2024-11-17 01:26:26,403 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-17 01:26:26,425 main DEBUG Took 0.019039 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-17 01:26:26,425 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-17 01:26:26,426 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-17 01:26:26,427 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-17 01:26:26,428 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,435 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-17 01:26:26,447 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,449 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,450 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,450 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,451 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,452 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,453 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,453 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,454 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,454 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,455 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,456 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,456 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,457 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,458 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,458 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,459 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,459 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,460 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,460 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,461 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,461 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,462 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,462 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-17 01:26:26,463 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,463 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-17 01:26:26,465 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-17 01:26:26,467 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-17 01:26:26,470 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-17 01:26:26,470 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-17 01:26:26,472 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-17 01:26:26,472 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-17 01:26:26,483 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-17 01:26:26,487 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-17 01:26:26,489 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-17 01:26:26,490 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-17 01:26:26,490 main DEBUG createAppenders(={Console}) 2024-11-17 01:26:26,491 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-17 01:26:26,491 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-17 01:26:26,492 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-17 01:26:26,492 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-17 01:26:26,493 main DEBUG OutputStream closed 2024-11-17 01:26:26,493 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-17 01:26:26,493 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-17 01:26:26,493 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-17 01:26:26,563 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-17 01:26:26,565 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-17 01:26:26,566 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-17 01:26:26,567 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-17 01:26:26,567 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-17 01:26:26,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-17 01:26:26,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-17 01:26:26,568 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-17 01:26:26,569 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-17 01:26:26,569 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-17 01:26:26,569 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-17 01:26:26,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-17 01:26:26,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-17 01:26:26,570 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-17 01:26:26,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-17 01:26:26,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-17 01:26:26,571 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-17 01:26:26,572 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-17 01:26:26,574 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17 01:26:26,574 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-17 01:26:26,574 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-17 01:26:26,575 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-17T01:26:26,778 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b 2024-11-17 01:26:26,781 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-17 01:26:26,782 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-17T01:26:26,790 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-17T01:26:26,810 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-17T01:26:26,813 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841, deleteOnExit=true 2024-11-17T01:26:26,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-17T01:26:26,814 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/test.cache.data in system properties and HBase conf 2024-11-17T01:26:26,815 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.tmp.dir in system properties and HBase conf 2024-11-17T01:26:26,816 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.log.dir in system properties and HBase conf 2024-11-17T01:26:26,816 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-17T01:26:26,817 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-17T01:26:26,817 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-17T01:26:26,900 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-17T01:26:26,989 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-17T01:26:26,993 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-17T01:26:26,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-17T01:26:26,994 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-17T01:26:26,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T01:26:26,995 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-17T01:26:26,996 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-17T01:26:26,996 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-17T01:26:26,997 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T01:26:26,997 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-17T01:26:26,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/nfs.dump.dir in system properties and HBase conf 2024-11-17T01:26:26,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/java.io.tmpdir in system properties and HBase conf 2024-11-17T01:26:26,998 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-17T01:26:26,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-17T01:26:26,999 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-17T01:26:27,937 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-17T01:26:28,016 INFO [Time-limited test {}] log.Log(170): Logging initialized @2307ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-17T01:26:28,085 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T01:26:28,155 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T01:26:28,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T01:26:28,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T01:26:28,178 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-17T01:26:28,190 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T01:26:28,193 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.log.dir/,AVAILABLE} 2024-11-17T01:26:28,195 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T01:26:28,388 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/java.io.tmpdir/jetty-localhost-43795-hadoop-hdfs-3_4_1-tests_jar-_-any-5727653039845102088/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T01:26:28,394 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:43795} 2024-11-17T01:26:28,394 INFO [Time-limited test {}] server.Server(415): Started @2686ms 2024-11-17T01:26:28,888 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-17T01:26:28,897 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-17T01:26:28,898 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-17T01:26:28,898 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-17T01:26:28,898 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-17T01:26:28,899 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.log.dir/,AVAILABLE} 2024-11-17T01:26:28,900 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-17T01:26:29,012 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10ba49e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/java.io.tmpdir/jetty-localhost-41903-hadoop-hdfs-3_4_1-tests_jar-_-any-6610423301685993888/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T01:26:29,013 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:41903} 2024-11-17T01:26:29,013 INFO [Time-limited test {}] server.Server(415): Started @3305ms 2024-11-17T01:26:29,066 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-17T01:26:29,872 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data1/current/BP-349443756-172.17.0.2-1731806787529/current, will proceed with Du for space computation calculation, 2024-11-17T01:26:29,872 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data2/current/BP-349443756-172.17.0.2-1731806787529/current, will proceed with Du for space computation calculation, 2024-11-17T01:26:29,897 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-17T01:26:29,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417339378b9e3f23 with lease ID 0x19b25d25c44c565b: Processing first storage report for DS-0668c0d4-4960-4f6e-a9f6-bc41593f69a0 from datanode DatanodeRegistration(127.0.0.1:40381, datanodeUuid=179fcf81-5be3-4b56-a246-03f9dcc7c88c, infoPort=34477, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=1832743990;c=1731806787529) 2024-11-17T01:26:29,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417339378b9e3f23 with lease ID 0x19b25d25c44c565b: from storage DS-0668c0d4-4960-4f6e-a9f6-bc41593f69a0 node DatanodeRegistration(127.0.0.1:40381, datanodeUuid=179fcf81-5be3-4b56-a246-03f9dcc7c88c, infoPort=34477, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=1832743990;c=1731806787529), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-17T01:26:29,938 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x417339378b9e3f23 with lease ID 0x19b25d25c44c565b: Processing first storage report for DS-df8dca4b-f6c1-4aa6-9f8a-da487097f5df from datanode DatanodeRegistration(127.0.0.1:40381, datanodeUuid=179fcf81-5be3-4b56-a246-03f9dcc7c88c, infoPort=34477, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=1832743990;c=1731806787529) 2024-11-17T01:26:29,938 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x417339378b9e3f23 with lease ID 0x19b25d25c44c565b: from storage DS-df8dca4b-f6c1-4aa6-9f8a-da487097f5df node DatanodeRegistration(127.0.0.1:40381, datanodeUuid=179fcf81-5be3-4b56-a246-03f9dcc7c88c, infoPort=34477, infoSecurePort=0, ipcPort=34149, storageInfo=lv=-57;cid=testClusterID;nsid=1832743990;c=1731806787529), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-17T01:26:29,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b 2024-11-17T01:26:30,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/zookeeper_0, clientPort=63898, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-17T01:26:30,022 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=63898 2024-11-17T01:26:30,036 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:30,040 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:30,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741825_1001 (size=7) 2024-11-17T01:26:30,652 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 with version=8 2024-11-17T01:26:30,653 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/hbase-staging 2024-11-17T01:26:30,763 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-17T01:26:30,996 INFO [Time-limited test {}] client.ConnectionUtils(129): master/04f7e7347dc7:0 server-side Connection retries=45 2024-11-17T01:26:31,011 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,012 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T01:26:31,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,012 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T01:26:31,127 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T01:26:31,176 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-17T01:26:31,184 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-17T01:26:31,187 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T01:26:31,209 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 48481 (auto-detected) 2024-11-17T01:26:31,210 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-17T01:26:31,227 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33741 2024-11-17T01:26:31,234 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:31,236 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:31,247 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:33741 connecting to ZooKeeper ensemble=127.0.0.1:63898 2024-11-17T01:26:31,343 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:337410x0, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T01:26:31,345 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33741-0x10147b023e30000 connected 2024-11-17T01:26:31,425 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T01:26:31,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T01:26:31,431 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T01:26:31,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33741 2024-11-17T01:26:31,435 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33741 2024-11-17T01:26:31,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33741 2024-11-17T01:26:31,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33741 2024-11-17T01:26:31,437 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33741 2024-11-17T01:26:31,444 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50, hbase.cluster.distributed=false 2024-11-17T01:26:31,504 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/04f7e7347dc7:0 server-side Connection retries=45 2024-11-17T01:26:31,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,505 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-17T01:26:31,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-17T01:26:31,505 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-17T01:26:31,507 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-17T01:26:31,510 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-17T01:26:31,511 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:37721 2024-11-17T01:26:31,512 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-17T01:26:31,517 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-17T01:26:31,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:31,520 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:31,524 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:37721 connecting to ZooKeeper ensemble=127.0.0.1:63898 2024-11-17T01:26:31,532 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:377210x0, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-17T01:26:31,532 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37721-0x10147b023e30001 connected 2024-11-17T01:26:31,532 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:377210x0, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T01:26:31,534 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T01:26:31,535 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-17T01:26:31,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37721 2024-11-17T01:26:31,536 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37721 2024-11-17T01:26:31,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37721 2024-11-17T01:26:31,537 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37721 2024-11-17T01:26:31,538 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37721 2024-11-17T01:26:31,545 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:31,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T01:26:31,557 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T01:26:31,559 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:31,559 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;04f7e7347dc7:33741 2024-11-17T01:26:31,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T01:26:31,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-17T01:26:31,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:31,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:31,588 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T01:26:31,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-17T01:26:31,588 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/04f7e7347dc7,33741,1731806790757 from backup master directory 2024-11-17T01:26:31,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:31,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T01:26:31,599 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-17T01:26:31,599 WARN [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T01:26:31,600 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:31,603 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-17T01:26:31,605 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-17T01:26:31,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741826_1002 (size=42) 2024-11-17T01:26:32,073 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/hbase.id with ID: 024c62a6-ae3f-4682-9063-fcb68721b523 2024-11-17T01:26:32,115 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-17T01:26:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:32,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:32,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741827_1003 (size=196) 2024-11-17T01:26:32,617 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:26:32,618 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-17T01:26:32,636 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:32,641 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-17T01:26:32,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741828_1004 (size=1189) 2024-11-17T01:26:33,096 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store 2024-11-17T01:26:33,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741829_1005 (size=34) 2024-11-17T01:26:33,520 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-17T01:26:33,521 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:33,522 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T01:26:33,522 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:26:33,522 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:26:33,522 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T01:26:33,522 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:26:33,523 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:26:33,523 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-17T01:26:33,525 WARN [master/04f7e7347dc7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/.initializing 2024-11-17T01:26:33,525 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/WALs/04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:33,531 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-17T01:26:33,540 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=04f7e7347dc7%2C33741%2C1731806790757, suffix=, logDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/WALs/04f7e7347dc7,33741,1731806790757, archiveDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/oldWALs, maxLogs=10 2024-11-17T01:26:33,557 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/WALs/04f7e7347dc7,33741,1731806790757/04f7e7347dc7%2C33741%2C1731806790757.1731806793544, exclude list is [], retry=0 2024-11-17T01:26:33,571 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40381,DS-0668c0d4-4960-4f6e-a9f6-bc41593f69a0,DISK] 2024-11-17T01:26:33,574 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-17T01:26:33,604 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/WALs/04f7e7347dc7,33741,1731806790757/04f7e7347dc7%2C33741%2C1731806790757.1731806793544 2024-11-17T01:26:33,605 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34477:34477)] 2024-11-17T01:26:33,606 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:26:33,606 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:33,609 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,610 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,665 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-17T01:26:33,668 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:33,671 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:33,672 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,675 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-17T01:26:33,675 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:33,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:33,677 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,680 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-17T01:26:33,680 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:33,681 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:33,682 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,684 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-17T01:26:33,685 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:33,686 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:33,689 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,691 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,699 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-17T01:26:33,703 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-17T01:26:33,707 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:26:33,709 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74808817, jitterRate=0.11473824083805084}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-17T01:26:33,715 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-17T01:26:33,716 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-17T01:26:33,740 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c429f60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:33,767 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-17T01:26:33,778 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-17T01:26:33,778 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-17T01:26:33,780 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-17T01:26:33,781 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-17T01:26:33,786 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 4 msec 2024-11-17T01:26:33,786 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-17T01:26:33,812 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-17T01:26:33,826 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-17T01:26:33,865 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-17T01:26:33,868 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-17T01:26:33,869 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-17T01:26:33,878 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-17T01:26:33,880 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-17T01:26:33,883 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-17T01:26:33,890 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-17T01:26:33,891 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-17T01:26:33,898 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-17T01:26:33,907 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-17T01:26:33,915 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-17T01:26:33,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T01:26:33,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-17T01:26:33,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:33,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:33,924 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=04f7e7347dc7,33741,1731806790757, sessionid=0x10147b023e30000, setting cluster-up flag (Was=false) 2024-11-17T01:26:33,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:33,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:33,973 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-17T01:26:33,975 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:33,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:33,995 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:34,020 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-17T01:26:34,021 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:34,059 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;04f7e7347dc7:37721 2024-11-17T01:26:34,061 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1008): ClusterId : 024c62a6-ae3f-4682-9063-fcb68721b523 2024-11-17T01:26:34,063 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-17T01:26:34,075 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-17T01:26:34,076 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-17T01:26:34,088 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-17T01:26:34,088 DEBUG [RS:0;04f7e7347dc7:37721 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25d3d569, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:34,090 DEBUG [RS:0;04f7e7347dc7:37721 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a7d036c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=04f7e7347dc7/172.17.0.2:0 2024-11-17T01:26:34,093 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-17T01:26:34,093 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-17T01:26:34,093 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-17T01:26:34,095 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(3073): reportForDuty to master=04f7e7347dc7,33741,1731806790757 with isa=04f7e7347dc7/172.17.0.2:37721, startcode=1731806791503 2024-11-17T01:26:34,100 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-17T01:26:34,105 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-17T01:26:34,107 DEBUG [RS:0;04f7e7347dc7:37721 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T01:26:34,108 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-17T01:26:34,112 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 04f7e7347dc7,33741,1731806790757 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-17T01:26:34,116 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/04f7e7347dc7:0, corePoolSize=5, maxPoolSize=5 2024-11-17T01:26:34,116 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/04f7e7347dc7:0, corePoolSize=5, maxPoolSize=5 2024-11-17T01:26:34,116 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/04f7e7347dc7:0, corePoolSize=5, maxPoolSize=5 2024-11-17T01:26:34,117 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/04f7e7347dc7:0, corePoolSize=5, maxPoolSize=5 2024-11-17T01:26:34,117 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/04f7e7347dc7:0, corePoolSize=10, maxPoolSize=10 2024-11-17T01:26:34,117 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,117 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/04f7e7347dc7:0, corePoolSize=2, maxPoolSize=2 2024-11-17T01:26:34,118 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,123 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-17T01:26:34,123 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-17T01:26:34,127 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731806824127 2024-11-17T01:26:34,127 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:34,128 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T01:26:34,129 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-17T01:26:34,130 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-17T01:26:34,133 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-17T01:26:34,134 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-17T01:26:34,134 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-17T01:26:34,134 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-17T01:26:34,137 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,139 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-17T01:26:34,141 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-17T01:26:34,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741831_1007 (size=1039) 2024-11-17T01:26:34,141 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-17T01:26:34,143 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-17T01:26:34,143 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60641, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T01:26:34,144 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-17T01:26:34,148 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.large.0-1731806794145,5,FailOnTimeoutGroup] 2024-11-17T01:26:34,148 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.small.0-1731806794148,5,FailOnTimeoutGroup] 2024-11-17T01:26:34,148 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,148 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-17T01:26:34,149 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,150 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,150 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33741 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:34,172 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-17T01:26:34,172 WARN [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-17T01:26:34,275 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(3073): reportForDuty to master=04f7e7347dc7,33741,1731806790757 with isa=04f7e7347dc7/172.17.0.2:37721, startcode=1731806791503 2024-11-17T01:26:34,278 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33741 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,283 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33741 {}] master.ServerManager(486): Registering regionserver=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,292 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:26:34,292 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:37251 2024-11-17T01:26:34,292 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-17T01:26:34,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T01:26:34,304 DEBUG [RS:0;04f7e7347dc7:37721 {}] zookeeper.ZKUtil(111): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,304 WARN [RS:0;04f7e7347dc7:37721 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-17T01:26:34,304 INFO [RS:0;04f7e7347dc7:37721 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-17T01:26:34,305 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,306 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [04f7e7347dc7,37721,1731806791503] 2024-11-17T01:26:34,321 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-17T01:26:34,332 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-17T01:26:34,346 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-17T01:26:34,348 INFO [RS:0;04f7e7347dc7:37721 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-17T01:26:34,348 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,349 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-17T01:26:34,354 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,354 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/04f7e7347dc7:0, corePoolSize=2, maxPoolSize=2 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,355 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,356 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,356 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,356 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/04f7e7347dc7:0, corePoolSize=1, maxPoolSize=1 2024-11-17T01:26:34,356 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/04f7e7347dc7:0, corePoolSize=3, maxPoolSize=3 2024-11-17T01:26:34,356 DEBUG [RS:0;04f7e7347dc7:37721 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0, corePoolSize=3, maxPoolSize=3 2024-11-17T01:26:34,357 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,357 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,357 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,357 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,357 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,37721,1731806791503-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T01:26:34,383 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-17T01:26:34,385 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,37721,1731806791503-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:34,403 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.Replication(204): 04f7e7347dc7,37721,1731806791503 started 2024-11-17T01:26:34,403 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1767): Serving as 04f7e7347dc7,37721,1731806791503, RpcServer on 04f7e7347dc7/172.17.0.2:37721, sessionid=0x10147b023e30001 2024-11-17T01:26:34,404 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-17T01:26:34,404 DEBUG [RS:0;04f7e7347dc7:37721 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,404 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '04f7e7347dc7,37721,1731806791503' 2024-11-17T01:26:34,404 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-17T01:26:34,405 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-17T01:26:34,406 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-17T01:26:34,406 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-17T01:26:34,406 DEBUG [RS:0;04f7e7347dc7:37721 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:34,406 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '04f7e7347dc7,37721,1731806791503' 2024-11-17T01:26:34,406 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-17T01:26:34,407 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-17T01:26:34,408 DEBUG [RS:0;04f7e7347dc7:37721 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-17T01:26:34,408 INFO [RS:0;04f7e7347dc7:37721 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-17T01:26:34,408 INFO [RS:0;04f7e7347dc7:37721 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-17T01:26:34,515 INFO [RS:0;04f7e7347dc7:37721 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-17T01:26:34,519 INFO [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=04f7e7347dc7%2C37721%2C1731806791503, suffix=, logDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503, archiveDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/oldWALs, maxLogs=32 2024-11-17T01:26:34,534 DEBUG [RS:0;04f7e7347dc7:37721 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503/04f7e7347dc7%2C37721%2C1731806791503.1731806794521, exclude list is [], retry=0 2024-11-17T01:26:34,538 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40381,DS-0668c0d4-4960-4f6e-a9f6-bc41593f69a0,DISK] 2024-11-17T01:26:34,542 INFO [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503/04f7e7347dc7%2C37721%2C1731806791503.1731806794521 2024-11-17T01:26:34,542 DEBUG [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34477:34477)] 2024-11-17T01:26:34,543 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-17T01:26:34,544 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:26:34,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741833_1009 (size=32) 2024-11-17T01:26:34,956 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:34,961 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T01:26:34,965 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T01:26:34,966 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:34,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:34,967 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T01:26:34,971 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T01:26:34,971 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:34,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:34,972 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T01:26:34,974 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T01:26:34,974 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:34,975 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:34,977 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740 2024-11-17T01:26:34,978 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740 2024-11-17T01:26:34,982 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:26:34,984 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-17T01:26:34,988 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:26:34,989 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66181747, jitterRate=-0.013815119862556458}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:26:34,991 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-17T01:26:34,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-17T01:26:34,992 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-17T01:26:34,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-17T01:26:34,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T01:26:34,992 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T01:26:34,993 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-17T01:26:34,993 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-17T01:26:34,995 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-17T01:26:34,995 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-17T01:26:35,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-17T01:26:35,007 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-17T01:26:35,010 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-17T01:26:35,164 DEBUG [04f7e7347dc7:33741 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-17T01:26:35,171 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:35,176 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 04f7e7347dc7,37721,1731806791503, state=OPENING 2024-11-17T01:26:35,215 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-17T01:26:35,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:35,223 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:35,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T01:26:35,225 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T01:26:35,229 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:26:35,405 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:35,406 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-17T01:26:35,409 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34134, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-17T01:26:35,420 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-17T01:26:35,420 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-17T01:26:35,421 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-17T01:26:35,424 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=04f7e7347dc7%2C37721%2C1731806791503.meta, suffix=.meta, logDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503, archiveDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/oldWALs, maxLogs=32 2024-11-17T01:26:35,439 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503/04f7e7347dc7%2C37721%2C1731806791503.meta.1731806795426.meta, exclude list is [], retry=0 2024-11-17T01:26:35,442 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:40381,DS-0668c0d4-4960-4f6e-a9f6-bc41593f69a0,DISK] 2024-11-17T01:26:35,445 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/WALs/04f7e7347dc7,37721,1731806791503/04f7e7347dc7%2C37721%2C1731806791503.meta.1731806795426.meta 2024-11-17T01:26:35,445 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34477:34477)] 2024-11-17T01:26:35,446 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:26:35,447 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-17T01:26:35,496 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-17T01:26:35,500 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-17T01:26:35,504 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-17T01:26:35,504 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:35,504 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-17T01:26:35,504 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-17T01:26:35,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-17T01:26:35,509 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-17T01:26:35,509 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:35,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:35,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-17T01:26:35,512 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-17T01:26:35,512 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:35,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:35,513 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-17T01:26:35,514 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-17T01:26:35,514 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:35,515 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-17T01:26:35,517 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740 2024-11-17T01:26:35,519 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740 2024-11-17T01:26:35,522 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:26:35,525 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-17T01:26:35,526 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66288569, jitterRate=-0.012223348021507263}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:26:35,528 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-17T01:26:35,534 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731806795401 2024-11-17T01:26:35,543 DEBUG [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-17T01:26:35,544 INFO [RS_OPEN_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-17T01:26:35,544 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:35,546 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 04f7e7347dc7,37721,1731806791503, state=OPEN 2024-11-17T01:26:35,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T01:26:35,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-17T01:26:35,573 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T01:26:35,573 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-17T01:26:35,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-17T01:26:35,581 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=04f7e7347dc7,37721,1731806791503 in 345 msec 2024-11-17T01:26:35,590 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-17T01:26:35,591 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 583 msec 2024-11-17T01:26:35,597 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5400 sec 2024-11-17T01:26:35,597 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731806795597, completionTime=-1 2024-11-17T01:26:35,597 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-17T01:26:35,598 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-17T01:26:35,628 DEBUG [hconnection-0x526f2908-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:35,631 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34146, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:35,640 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-17T01:26:35,640 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731806855640 2024-11-17T01:26:35,640 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731806915640 2024-11-17T01:26:35,640 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 42 msec 2024-11-17T01:26:35,675 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:35,675 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:35,675 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:35,676 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-04f7e7347dc7:33741, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:35,677 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:35,681 DEBUG [master/04f7e7347dc7:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-17T01:26:35,684 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-17T01:26:35,685 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-17T01:26:35,691 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-17T01:26:35,694 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:26:35,695 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:35,697 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:26:35,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741835_1011 (size=358) 2024-11-17T01:26:36,118 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 60ebde759866ffdd749c0e1b676599ae, NAME => 'hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:26:36,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741836_1012 (size=42) 2024-11-17T01:26:36,532 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:36,533 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 60ebde759866ffdd749c0e1b676599ae, disabling compactions & flushes 2024-11-17T01:26:36,533 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,533 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,534 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. after waiting 0 ms 2024-11-17T01:26:36,534 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,534 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,534 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 60ebde759866ffdd749c0e1b676599ae: 2024-11-17T01:26:36,539 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:26:36,546 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1731806796540"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806796540"}]},"ts":"1731806796540"} 2024-11-17T01:26:36,566 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:26:36,568 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:26:36,571 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806796568"}]},"ts":"1731806796568"} 2024-11-17T01:26:36,575 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-17T01:26:36,627 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=60ebde759866ffdd749c0e1b676599ae, ASSIGN}] 2024-11-17T01:26:36,631 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=60ebde759866ffdd749c0e1b676599ae, ASSIGN 2024-11-17T01:26:36,635 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=60ebde759866ffdd749c0e1b676599ae, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:26:36,785 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=60ebde759866ffdd749c0e1b676599ae, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:36,791 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 60ebde759866ffdd749c0e1b676599ae, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:26:36,950 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:36,965 INFO [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,965 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 60ebde759866ffdd749c0e1b676599ae, NAME => 'hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:26:36,965 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,965 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:36,966 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,966 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,968 INFO [StoreOpener-60ebde759866ffdd749c0e1b676599ae-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,970 INFO [StoreOpener-60ebde759866ffdd749c0e1b676599ae-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 60ebde759866ffdd749c0e1b676599ae columnFamilyName info 2024-11-17T01:26:36,971 DEBUG [StoreOpener-60ebde759866ffdd749c0e1b676599ae-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:36,971 INFO [StoreOpener-60ebde759866ffdd749c0e1b676599ae-1 {}] regionserver.HStore(327): Store=60ebde759866ffdd749c0e1b676599ae/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:36,973 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,974 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,978 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:26:36,982 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:26:36,983 INFO [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 60ebde759866ffdd749c0e1b676599ae; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67029573, jitterRate=-0.0011815279722213745}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-17T01:26:36,985 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 60ebde759866ffdd749c0e1b676599ae: 2024-11-17T01:26:36,986 INFO [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae., pid=6, masterSystemTime=1731806796950 2024-11-17T01:26:36,989 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,990 INFO [RS_OPEN_PRIORITY_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:26:36,991 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=60ebde759866ffdd749c0e1b676599ae, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:36,998 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-17T01:26:36,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 60ebde759866ffdd749c0e1b676599ae, server=04f7e7347dc7,37721,1731806791503 in 203 msec 2024-11-17T01:26:37,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-17T01:26:37,001 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=60ebde759866ffdd749c0e1b676599ae, ASSIGN in 371 msec 2024-11-17T01:26:37,003 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:26:37,003 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806797003"}]},"ts":"1731806797003"} 2024-11-17T01:26:37,006 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-17T01:26:37,016 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:26:37,019 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3300 sec 2024-11-17T01:26:37,097 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-17T01:26:37,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-17T01:26:37,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:37,132 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:26:37,178 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-17T01:26:37,203 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-17T01:26:37,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 40 msec 2024-11-17T01:26:37,222 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-17T01:26:37,245 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-17T01:26:37,259 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 34 msec 2024-11-17T01:26:37,286 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-17T01:26:37,303 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-17T01:26:37,304 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 5.703sec 2024-11-17T01:26:37,306 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-17T01:26:37,309 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-17T01:26:37,311 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-17T01:26:37,311 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-17T01:26:37,311 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-17T01:26:37,312 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-17T01:26:37,312 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-17T01:26:37,319 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-17T01:26:37,319 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-17T01:26:37,320 INFO [master/04f7e7347dc7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=04f7e7347dc7,33741,1731806790757-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-17T01:26:37,368 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3771e354 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38630296 2024-11-17T01:26:37,369 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-17T01:26:37,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6321da62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:37,387 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-17T01:26:37,387 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-17T01:26:37,396 DEBUG [hconnection-0x13adb0ff-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:37,403 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:37,411 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=04f7e7347dc7,33741,1731806790757 2024-11-17T01:26:37,425 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=153, ProcessCount=11, AvailableMemoryMB=4571 2024-11-17T01:26:37,435 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:26:37,438 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:26:37,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:26:37,450 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:26:37,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:26:37,454 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:26:37,454 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:37,456 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-17T01:26:37,456 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:26:37,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:37,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741837_1013 (size=960) 2024-11-17T01:26:37,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:37,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:37,875 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:26:37,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741838_1014 (size=53) 2024-11-17T01:26:38,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:38,292 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:38,293 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 733401ed1ccb71c159e3f227c30cedc7, disabling compactions & flushes 2024-11-17T01:26:38,293 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,293 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,293 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. after waiting 0 ms 2024-11-17T01:26:38,293 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,294 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,294 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:38,296 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:26:38,297 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806798296"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806798296"}]},"ts":"1731806798296"} 2024-11-17T01:26:38,301 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:26:38,303 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:26:38,303 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806798303"}]},"ts":"1731806798303"} 2024-11-17T01:26:38,306 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:26:38,357 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, ASSIGN}] 2024-11-17T01:26:38,360 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, ASSIGN 2024-11-17T01:26:38,363 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:26:38,515 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=733401ed1ccb71c159e3f227c30cedc7, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:38,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:26:38,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:38,677 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:38,689 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,690 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:26:38,690 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,690 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:26:38,691 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,691 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,693 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,697 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:26:38,698 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 733401ed1ccb71c159e3f227c30cedc7 columnFamilyName A 2024-11-17T01:26:38,698 DEBUG [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:38,699 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(327): Store=733401ed1ccb71c159e3f227c30cedc7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:38,699 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,702 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:26:38,702 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 733401ed1ccb71c159e3f227c30cedc7 columnFamilyName B 2024-11-17T01:26:38,702 DEBUG [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:38,703 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(327): Store=733401ed1ccb71c159e3f227c30cedc7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:38,703 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,705 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:26:38,706 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 733401ed1ccb71c159e3f227c30cedc7 columnFamilyName C 2024-11-17T01:26:38,706 DEBUG [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:26:38,706 INFO [StoreOpener-733401ed1ccb71c159e3f227c30cedc7-1 {}] regionserver.HStore(327): Store=733401ed1ccb71c159e3f227c30cedc7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:26:38,707 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,708 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,709 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,712 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:26:38,714 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:38,717 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:26:38,718 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 733401ed1ccb71c159e3f227c30cedc7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58923809, jitterRate=-0.12196682393550873}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:26:38,719 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:38,720 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., pid=11, masterSystemTime=1731806798677 2024-11-17T01:26:38,723 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,723 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:38,724 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=733401ed1ccb71c159e3f227c30cedc7, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:38,729 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-17T01:26:38,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 in 204 msec 2024-11-17T01:26:38,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-17T01:26:38,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, ASSIGN in 372 msec 2024-11-17T01:26:38,734 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:26:38,734 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806798734"}]},"ts":"1731806798734"} 2024-11-17T01:26:38,737 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:26:38,776 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:26:38,782 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.3260 sec 2024-11-17T01:26:39,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-17T01:26:39,591 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-17T01:26:39,601 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63607639 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e67f019 2024-11-17T01:26:39,642 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fcb5f29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,646 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,649 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53544, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,654 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:26:39,656 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:26:39,664 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53b8a93e to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5095ba91 2024-11-17T01:26:39,674 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f2091cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,675 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-17T01:26:39,688 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22cb07dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,690 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1324ee83 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62c43377 2024-11-17T01:26:39,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,702 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-17T01:26:39,713 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5400112e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,716 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048068a5 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a8f4734 2024-11-17T01:26:39,730 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,735 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b5f27aa to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10c964e8 2024-11-17T01:26:39,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ed28bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,747 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-17T01:26:39,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,760 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-11-17T01:26:39,771 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-17T01:26:39,787 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:26:39,792 DEBUG [hconnection-0x76910286-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,796 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53560, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,797 DEBUG [hconnection-0x1a036934-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,797 DEBUG [hconnection-0x1b94e7c8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,803 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53570, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,804 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,805 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:39,808 DEBUG [hconnection-0x63f71b8d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,811 DEBUG [hconnection-0x53bc7585-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-17T01:26:39,813 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:39,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:39,815 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:39,816 DEBUG [hconnection-0x2cbdc5ac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,817 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:39,818 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53582, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,821 DEBUG [hconnection-0x588b494f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,822 DEBUG [hconnection-0x2a80f7f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,823 DEBUG [hconnection-0x1b171e57-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:26:39,829 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,832 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53600, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,837 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53616, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,838 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53630, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,840 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53644, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:26:39,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:39,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:39,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:39,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:39,891 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:39,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:39,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:39,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:39,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:39,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:39,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:39,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:39,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:39,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:39,993 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:39,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,000 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a89265a5b4904c84b4b9f85bfa7e4a6f is 50, key is test_row_0/A:col10/1731806799841/Put/seqid=0 2024-11-17T01:26:40,012 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806859998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,015 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806860003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806860002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806860007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806860009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741839_1015 (size=19021) 2024-11-17T01:26:40,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a89265a5b4904c84b4b9f85bfa7e4a6f 2024-11-17T01:26:40,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:40,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806860118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,124 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806860120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806860120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806860121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,128 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806860126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,150 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/aa961b8b10ae499dad6bdca5860115bf is 50, key is test_row_0/B:col10/1731806799841/Put/seqid=0 2024-11-17T01:26:40,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741840_1016 (size=12001) 2024-11-17T01:26:40,182 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,183 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,191 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,326 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-17T01:26:40,327 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-17T01:26:40,328 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-17T01:26:40,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806860338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806860338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,343 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806860341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806860342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806860342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,347 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:40,505 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,507 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,568 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/aa961b8b10ae499dad6bdca5860115bf 2024-11-17T01:26:40,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/ffb623cc5cd84779a672ec98ce9d9bad is 50, key is test_row_0/C:col10/1731806799841/Put/seqid=0 2024-11-17T01:26:40,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806860646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,648 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806860647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,651 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806860649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,653 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806860651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,654 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:40,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806860651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,660 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,661 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741841_1017 (size=12001) 2024-11-17T01:26:40,816 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:40,972 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:40,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:40,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:40,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:40,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,017 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:26:41,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/ffb623cc5cd84779a672ec98ce9d9bad 2024-11-17T01:26:41,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a89265a5b4904c84b4b9f85bfa7e4a6f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f 2024-11-17T01:26:41,114 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f, entries=300, sequenceid=13, filesize=18.6 K 2024-11-17T01:26:41,122 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/aa961b8b10ae499dad6bdca5860115bf as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf 2024-11-17T01:26:41,136 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:41,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:41,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,137 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf, entries=150, sequenceid=13, filesize=11.7 K 2024-11-17T01:26:41,146 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/ffb623cc5cd84779a672ec98ce9d9bad as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad 2024-11-17T01:26:41,168 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806861156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806861156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806861156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,173 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806861160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,173 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad, entries=150, sequenceid=13, filesize=11.7 K 2024-11-17T01:26:41,174 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-17T01:26:41,174 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-17T01:26:41,175 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 733401ed1ccb71c159e3f227c30cedc7 in 1297ms, sequenceid=13, compaction requested=false 2024-11-17T01:26:41,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:41,176 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-17T01:26:41,176 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-17T01:26:41,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:41,178 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T01:26:41,178 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-17T01:26:41,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-17T01:26:41,178 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-17T01:26:41,178 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-17T01:26:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,178 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:41,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:41,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,180 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-17T01:26:41,180 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-17T01:26:41,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/da8dcf6fb30c4b2097072b9fda58cc05 is 50, key is test_row_0/A:col10/1731806799997/Put/seqid=0 2024-11-17T01:26:41,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741842_1018 (size=14341) 2024-11-17T01:26:41,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/da8dcf6fb30c4b2097072b9fda58cc05 2024-11-17T01:26:41,251 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/409f91929421410aa840d931f08f9c3a is 50, key is test_row_0/B:col10/1731806799997/Put/seqid=0 2024-11-17T01:26:41,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741843_1019 (size=12001) 2024-11-17T01:26:41,266 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/409f91929421410aa840d931f08f9c3a 2024-11-17T01:26:41,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806861273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,290 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,291 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:41,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,292 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/2bc3879a31574ba787bd9962a4b955a5 is 50, key is test_row_0/C:col10/1731806799997/Put/seqid=0 2024-11-17T01:26:41,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:41,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741844_1020 (size=12001) 2024-11-17T01:26:41,309 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/2bc3879a31574ba787bd9962a4b955a5 2024-11-17T01:26:41,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/da8dcf6fb30c4b2097072b9fda58cc05 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05 2024-11-17T01:26:41,367 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05, entries=200, sequenceid=38, filesize=14.0 K 2024-11-17T01:26:41,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/409f91929421410aa840d931f08f9c3a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a 2024-11-17T01:26:41,389 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806861384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,398 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a, entries=150, sequenceid=38, filesize=11.7 K 2024-11-17T01:26:41,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/2bc3879a31574ba787bd9962a4b955a5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5 2024-11-17T01:26:41,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5, entries=150, sequenceid=38, filesize=11.7 K 2024-11-17T01:26:41,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 733401ed1ccb71c159e3f227c30cedc7 in 244ms, sequenceid=38, compaction requested=false 2024-11-17T01:26:41,422 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:41,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:41,446 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-17T01:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,447 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:41,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:41,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:41,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:41,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/29f67f76eab34cc494388e8d733fac83 is 50, key is test_row_0/A:col10/1731806801210/Put/seqid=0 2024-11-17T01:26:41,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741845_1021 (size=12001) 2024-11-17T01:26:41,486 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/29f67f76eab34cc494388e8d733fac83 2024-11-17T01:26:41,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e32d915eb34d45d2ac6ddc9e4132d5f6 is 50, key is test_row_0/B:col10/1731806801210/Put/seqid=0 2024-11-17T01:26:41,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741846_1022 (size=12001) 2024-11-17T01:26:41,526 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e32d915eb34d45d2ac6ddc9e4132d5f6 2024-11-17T01:26:41,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/0fcffae1390646ddadd60cd562f96c84 is 50, key is test_row_0/C:col10/1731806801210/Put/seqid=0 2024-11-17T01:26:41,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741847_1023 (size=12001) 2024-11-17T01:26:41,563 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/0fcffae1390646ddadd60cd562f96c84 2024-11-17T01:26:41,577 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/29f67f76eab34cc494388e8d733fac83 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83 2024-11-17T01:26:41,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:41,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:41,597 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:26:41,599 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e32d915eb34d45d2ac6ddc9e4132d5f6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6 2024-11-17T01:26:41,612 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:26:41,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/0fcffae1390646ddadd60cd562f96c84 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84 2024-11-17T01:26:41,626 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:26:41,631 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=13.42 KB/13740 for 733401ed1ccb71c159e3f227c30cedc7 in 183ms, sequenceid=49, compaction requested=true 2024-11-17T01:26:41,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:41,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:41,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-17T01:26:41,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-17T01:26:41,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-17T01:26:41,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8160 sec 2024-11-17T01:26:41,640 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.8300 sec 2024-11-17T01:26:41,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:41,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:41,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:41,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f7daf7d338ad4d19b9b1b30912a852de is 50, key is test_row_0/A:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:41,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741848_1024 (size=12001) 2024-11-17T01:26:41,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f7daf7d338ad4d19b9b1b30912a852de 2024-11-17T01:26:41,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/49919e7314cb4279aae6a2cb09e9b9b0 is 50, key is test_row_0/B:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:41,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741849_1025 (size=12001) 2024-11-17T01:26:41,784 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/49919e7314cb4279aae6a2cb09e9b9b0 2024-11-17T01:26:41,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/f86ce407dd1e4b479c568e536d25648a is 50, key is test_row_0/C:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:41,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741850_1026 (size=12001) 2024-11-17T01:26:41,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-17T01:26:41,923 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-17T01:26:41,926 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:41,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-17T01:26:41,929 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:41,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:41,931 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:41,931 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:42,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806862001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:42,084 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806862111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806862179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,184 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806862181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806862182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,185 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806862183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:42,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/f86ce407dd1e4b479c568e536d25648a 2024-11-17T01:26:42,240 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,243 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,251 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f7daf7d338ad4d19b9b1b30912a852de as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de 2024-11-17T01:26:42,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de, entries=150, sequenceid=60, filesize=11.7 K 2024-11-17T01:26:42,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/49919e7314cb4279aae6a2cb09e9b9b0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0 2024-11-17T01:26:42,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0, entries=150, sequenceid=60, filesize=11.7 K 2024-11-17T01:26:42,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/f86ce407dd1e4b479c568e536d25648a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a 2024-11-17T01:26:42,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a, entries=150, sequenceid=60, filesize=11.7 K 2024-11-17T01:26:42,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 733401ed1ccb71c159e3f227c30cedc7 in 585ms, sequenceid=60, compaction requested=true 2024-11-17T01:26:42,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:42,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:42,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:42,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:42,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:42,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:42,299 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:42,299 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:42,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:42,303 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:42,305 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:42,305 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,306 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=46.9 K 2024-11-17T01:26:42,308 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting aa961b8b10ae499dad6bdca5860115bf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806799841 2024-11-17T01:26:42,308 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 57364 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:42,308 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:42,308 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,309 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=56.0 K 2024-11-17T01:26:42,309 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 409f91929421410aa840d931f08f9c3a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731806799997 2024-11-17T01:26:42,309 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a89265a5b4904c84b4b9f85bfa7e4a6f, keycount=300, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806799841 2024-11-17T01:26:42,310 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e32d915eb34d45d2ac6ddc9e4132d5f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806801210 2024-11-17T01:26:42,310 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting da8dcf6fb30c4b2097072b9fda58cc05, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731806799997 2024-11-17T01:26:42,313 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 49919e7314cb4279aae6a2cb09e9b9b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:42,313 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29f67f76eab34cc494388e8d733fac83, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806801210 2024-11-17T01:26:42,314 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f7daf7d338ad4d19b9b1b30912a852de, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:42,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:42,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:26:42,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:42,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:42,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:42,353 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:42,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:42,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:42,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f531c77b63049e0b8cf67a8879bef99 is 50, key is test_row_0/A:col10/1731806801998/Put/seqid=0 2024-11-17T01:26:42,385 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#12 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:42,386 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/77115b882001454387d7e8c5af5b3cc6 is 50, key is test_row_0/B:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:42,389 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#13 average throughput is 0.34 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:42,391 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4117b278a9b74fbcb6876ca0711d11f0 is 50, key is test_row_0/A:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:42,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741852_1028 (size=12139) 2024-11-17T01:26:42,428 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/77115b882001454387d7e8c5af5b3cc6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/77115b882001454387d7e8c5af5b3cc6 2024-11-17T01:26:42,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741851_1027 (size=12001) 2024-11-17T01:26:42,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f531c77b63049e0b8cf67a8879bef99 2024-11-17T01:26:42,452 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 77115b882001454387d7e8c5af5b3cc6(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:42,452 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:42,452 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=12, startTime=1731806802298; duration=0sec 2024-11-17T01:26:42,453 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:42,453 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:42,453 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:42,456 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:42,456 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:42,456 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,456 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=46.9 K 2024-11-17T01:26:42,457 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ffb623cc5cd84779a672ec98ce9d9bad, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806799841 2024-11-17T01:26:42,458 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bc3879a31574ba787bd9962a4b955a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731806799997 2024-11-17T01:26:42,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741853_1029 (size=12139) 2024-11-17T01:26:42,462 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fcffae1390646ddadd60cd562f96c84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806801210 2024-11-17T01:26:42,463 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f86ce407dd1e4b479c568e536d25648a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:42,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c9b111f31b5b4f5f8b0fa931035970e9 is 50, key is test_row_0/B:col10/1731806801998/Put/seqid=0 2024-11-17T01:26:42,481 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806862478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741854_1030 (size=12001) 2024-11-17T01:26:42,492 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c9b111f31b5b4f5f8b0fa931035970e9 2024-11-17T01:26:42,493 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#16 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:42,494 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4952551225c54902b508f5562d02e019 is 50, key is test_row_0/C:col10/1731806801696/Put/seqid=0 2024-11-17T01:26:42,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741855_1031 (size=12139) 2024-11-17T01:26:42,521 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a7a6bb67af65481ab82f028e39c4a114 is 50, key is test_row_0/C:col10/1731806801998/Put/seqid=0 2024-11-17T01:26:42,529 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4952551225c54902b508f5562d02e019 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4952551225c54902b508f5562d02e019 2024-11-17T01:26:42,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:42,542 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 4952551225c54902b508f5562d02e019(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:42,542 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:42,542 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=12, startTime=1731806802299; duration=0sec 2024-11-17T01:26:42,542 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:42,542 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:42,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741856_1032 (size=12001) 2024-11-17T01:26:42,552 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,552 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,553 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806862584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,708 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,708 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,788 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:42,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806862788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:42,863 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:42,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:42,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:42,864 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:42,879 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4117b278a9b74fbcb6876ca0711d11f0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4117b278a9b74fbcb6876ca0711d11f0 2024-11-17T01:26:42,895 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 4117b278a9b74fbcb6876ca0711d11f0(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:42,896 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:42,896 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=12, startTime=1731806802298; duration=0sec 2024-11-17T01:26:42,897 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:42,897 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:42,950 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a7a6bb67af65481ab82f028e39c4a114 2024-11-17T01:26:42,966 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f531c77b63049e0b8cf67a8879bef99 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99 2024-11-17T01:26:42,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99, entries=150, sequenceid=86, filesize=11.7 K 2024-11-17T01:26:42,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c9b111f31b5b4f5f8b0fa931035970e9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9 2024-11-17T01:26:43,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9, entries=150, sequenceid=86, filesize=11.7 K 2024-11-17T01:26:43,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a7a6bb67af65481ab82f028e39c4a114 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114 2024-11-17T01:26:43,019 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,019 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,023 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114, entries=150, sequenceid=86, filesize=11.7 K 2024-11-17T01:26:43,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 733401ed1ccb71c159e3f227c30cedc7 in 673ms, sequenceid=86, compaction requested=false 2024-11-17T01:26:43,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:43,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:43,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:43,094 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:43,095 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/72375699d99a40a484fa23f79d7eb28b is 50, key is test_row_0/A:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741857_1033 (size=14341) 2024-11-17T01:26:43,141 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/72375699d99a40a484fa23f79d7eb28b 2024-11-17T01:26:43,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4c46fb48cd514f039fdead697463ad77 is 50, key is test_row_0/B:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,175 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,175 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:43,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:43,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741858_1034 (size=12001) 2024-11-17T01:26:43,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4c46fb48cd514f039fdead697463ad77 2024-11-17T01:26:43,212 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:43,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806863209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7eeeb38f29f24e35b8aadc61aacbbeff is 50, key is test_row_0/C:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741859_1035 (size=12001) 2024-11-17T01:26:43,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7eeeb38f29f24e35b8aadc61aacbbeff 2024-11-17T01:26:43,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/72375699d99a40a484fa23f79d7eb28b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b 2024-11-17T01:26:43,259 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b, entries=200, sequenceid=100, filesize=14.0 K 2024-11-17T01:26:43,261 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4c46fb48cd514f039fdead697463ad77 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77 2024-11-17T01:26:43,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77, entries=150, sequenceid=100, filesize=11.7 K 2024-11-17T01:26:43,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7eeeb38f29f24e35b8aadc61aacbbeff as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff 2024-11-17T01:26:43,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff, entries=150, sequenceid=100, filesize=11.7 K 2024-11-17T01:26:43,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 733401ed1ccb71c159e3f227c30cedc7 in 203ms, sequenceid=100, compaction requested=true 2024-11-17T01:26:43,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:43,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:43,296 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:43,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:43,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:43,297 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:43,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:43,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:43,298 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:43,299 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:43,299 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:43,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:43,299 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,299 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/77115b882001454387d7e8c5af5b3cc6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=35.3 K 2024-11-17T01:26:43,299 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4117b278a9b74fbcb6876ca0711d11f0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.6 K 2024-11-17T01:26:43,300 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4117b278a9b74fbcb6876ca0711d11f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:43,300 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 77115b882001454387d7e8c5af5b3cc6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:43,301 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c9b111f31b5b4f5f8b0fa931035970e9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731806801976 2024-11-17T01:26:43,301 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f531c77b63049e0b8cf67a8879bef99, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731806801976 2024-11-17T01:26:43,302 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c46fb48cd514f039fdead697463ad77, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:43,302 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 72375699d99a40a484fa23f79d7eb28b, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:43,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:26:43,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:43,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:43,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:43,320 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:43,329 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:43,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:43,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,334 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f3d876e5876d4da2bc916207ec739e0c is 50, key is test_row_0/A:col10/1731806803316/Put/seqid=0 2024-11-17T01:26:43,346 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#22 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:43,347 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/359d7bb6ab2c4d05bbc212172413ab4b is 50, key is test_row_0/B:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,352 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#23 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:43,353 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/49e1e069455349cd9a555ae5c0408d4a is 50, key is test_row_0/A:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,373 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806863367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741861_1037 (size=12241) 2024-11-17T01:26:43,396 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/359d7bb6ab2c4d05bbc212172413ab4b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/359d7bb6ab2c4d05bbc212172413ab4b 2024-11-17T01:26:43,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741860_1036 (size=14341) 2024-11-17T01:26:43,398 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f3d876e5876d4da2bc916207ec739e0c 2024-11-17T01:26:43,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741862_1038 (size=12241) 2024-11-17T01:26:43,417 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 359d7bb6ab2c4d05bbc212172413ab4b(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:43,417 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,417 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806803296; duration=0sec 2024-11-17T01:26:43,418 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:43,418 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:43,418 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:43,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/bd6a333c92c142f496e207d9a4dc7c67 is 50, key is test_row_0/B:col10/1731806803316/Put/seqid=0 2024-11-17T01:26:43,422 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:43,423 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:43,423 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,423 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4952551225c54902b508f5562d02e019, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=35.3 K 2024-11-17T01:26:43,425 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4952551225c54902b508f5562d02e019, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=60, earliestPutTs=1731806801690 2024-11-17T01:26:43,427 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/49e1e069455349cd9a555ae5c0408d4a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/49e1e069455349cd9a555ae5c0408d4a 2024-11-17T01:26:43,427 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a7a6bb67af65481ab82f028e39c4a114, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1731806801976 2024-11-17T01:26:43,429 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 7eeeb38f29f24e35b8aadc61aacbbeff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:43,443 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 49e1e069455349cd9a555ae5c0408d4a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:43,443 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,443 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806803296; duration=0sec 2024-11-17T01:26:43,444 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:43,444 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:43,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741863_1039 (size=12001) 2024-11-17T01:26:43,457 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/bd6a333c92c142f496e207d9a4dc7c67 2024-11-17T01:26:43,458 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#25 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:43,459 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/99ba2b644a6749259aebfbac9d79e145 is 50, key is test_row_0/C:col10/1731806803091/Put/seqid=0 2024-11-17T01:26:43,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806863476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/cd55ccae16bf4a12b8fd4fd8ddd18591 is 50, key is test_row_0/C:col10/1731806803316/Put/seqid=0 2024-11-17T01:26:43,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:43,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:43,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:43,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741864_1040 (size=12241) 2024-11-17T01:26:43,516 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/99ba2b644a6749259aebfbac9d79e145 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/99ba2b644a6749259aebfbac9d79e145 2024-11-17T01:26:43,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741865_1041 (size=12001) 2024-11-17T01:26:43,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/cd55ccae16bf4a12b8fd4fd8ddd18591 2024-11-17T01:26:43,535 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 99ba2b644a6749259aebfbac9d79e145(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:43,535 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,535 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806803297; duration=0sec 2024-11-17T01:26:43,539 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:43,539 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:43,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/f3d876e5876d4da2bc916207ec739e0c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c 2024-11-17T01:26:43,551 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c, entries=200, sequenceid=126, filesize=14.0 K 2024-11-17T01:26:43,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/bd6a333c92c142f496e207d9a4dc7c67 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67 2024-11-17T01:26:43,565 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67, entries=150, sequenceid=126, filesize=11.7 K 2024-11-17T01:26:43,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/cd55ccae16bf4a12b8fd4fd8ddd18591 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591 2024-11-17T01:26:43,580 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591, entries=150, sequenceid=126, filesize=11.7 K 2024-11-17T01:26:43,582 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 733401ed1ccb71c159e3f227c30cedc7 in 262ms, sequenceid=126, compaction requested=false 2024-11-17T01:26:43,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:43,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-17T01:26:43,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:43,641 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:43,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:43,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4be2eafebad14c00b35e2541fdf2fa9c is 50, key is test_row_0/A:col10/1731806803359/Put/seqid=0 2024-11-17T01:26:43,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741866_1042 (size=9757) 2024-11-17T01:26:43,664 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4be2eafebad14c00b35e2541fdf2fa9c 2024-11-17T01:26:43,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/47460f09425c46c0baa804855076f8ed is 50, key is test_row_0/B:col10/1731806803359/Put/seqid=0 2024-11-17T01:26:43,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:43,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:43,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741867_1043 (size=9757) 2024-11-17T01:26:43,794 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:43,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806863791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:43,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:43,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806863896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:44,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806864101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,127 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/47460f09425c46c0baa804855076f8ed 2024-11-17T01:26:44,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e3499440d2b47028aa189c3ce1bd6d5 is 50, key is test_row_0/C:col10/1731806803359/Put/seqid=0 2024-11-17T01:26:44,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741868_1044 (size=9757) 2024-11-17T01:26:44,159 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=139 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e3499440d2b47028aa189c3ce1bd6d5 2024-11-17T01:26:44,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4be2eafebad14c00b35e2541fdf2fa9c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c 2024-11-17T01:26:44,184 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c, entries=100, sequenceid=139, filesize=9.5 K 2024-11-17T01:26:44,187 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/47460f09425c46c0baa804855076f8ed as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed 2024-11-17T01:26:44,196 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed, entries=100, sequenceid=139, filesize=9.5 K 2024-11-17T01:26:44,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e3499440d2b47028aa189c3ce1bd6d5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5 2024-11-17T01:26:44,202 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806864194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,203 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806864194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806864195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,204 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:44,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,204 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4207 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:44,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806864202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,204 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4198 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:44,205 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4204 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:44,211 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5, entries=100, sequenceid=139, filesize=9.5 K 2024-11-17T01:26:44,213 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 733401ed1ccb71c159e3f227c30cedc7 in 571ms, sequenceid=139, compaction requested=true 2024-11-17T01:26:44,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:44,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:44,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-17T01:26:44,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-17T01:26:44,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-17T01:26:44,220 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2840 sec 2024-11-17T01:26:44,225 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 2.2960 sec 2024-11-17T01:26:44,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:44,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-17T01:26:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,408 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:44,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/fa5666865edf41af84c35c43a238a165 is 50, key is test_row_0/A:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,432 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806864429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741869_1045 (size=12151) 2024-11-17T01:26:44,446 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/fa5666865edf41af84c35c43a238a165 2024-11-17T01:26:44,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a94bd5312ddf423b9a69934c9cb73d24 is 50, key is test_row_0/B:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741870_1046 (size=12151) 2024-11-17T01:26:44,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a94bd5312ddf423b9a69934c9cb73d24 2024-11-17T01:26:44,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/961c322e72f645f2b510d76de0ada9c4 is 50, key is test_row_0/C:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741871_1047 (size=12151) 2024-11-17T01:26:44,527 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/961c322e72f645f2b510d76de0ada9c4 2024-11-17T01:26:44,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806864534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,542 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/fa5666865edf41af84c35c43a238a165 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165 2024-11-17T01:26:44,554 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165, entries=150, sequenceid=166, filesize=11.9 K 2024-11-17T01:26:44,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a94bd5312ddf423b9a69934c9cb73d24 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24 2024-11-17T01:26:44,566 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24, entries=150, sequenceid=166, filesize=11.9 K 2024-11-17T01:26:44,568 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/961c322e72f645f2b510d76de0ada9c4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4 2024-11-17T01:26:44,576 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4, entries=150, sequenceid=166, filesize=11.9 K 2024-11-17T01:26:44,579 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 733401ed1ccb71c159e3f227c30cedc7 in 171ms, sequenceid=166, compaction requested=true 2024-11-17T01:26:44,579 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:44,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:44,579 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:44,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:44,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:44,579 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:44,579 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:44,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:44,580 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:44,582 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48490 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:44,582 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:44,582 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:44,583 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/49e1e069455349cd9a555ae5c0408d4a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=47.4 K 2024-11-17T01:26:44,584 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46150 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:44,584 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:44,584 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:44,584 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/359d7bb6ab2c4d05bbc212172413ab4b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=45.1 K 2024-11-17T01:26:44,585 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 359d7bb6ab2c4d05bbc212172413ab4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:44,585 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49e1e069455349cd9a555ae5c0408d4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:44,586 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3d876e5876d4da2bc916207ec739e0c, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731806803192 2024-11-17T01:26:44,587 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bd6a333c92c142f496e207d9a4dc7c67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731806803200 2024-11-17T01:26:44,588 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 47460f09425c46c0baa804855076f8ed, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1731806803359 2024-11-17T01:26:44,588 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4be2eafebad14c00b35e2541fdf2fa9c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1731806803359 2024-11-17T01:26:44,589 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a94bd5312ddf423b9a69934c9cb73d24, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:44,590 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa5666865edf41af84c35c43a238a165, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:44,608 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#33 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:44,609 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c2eb10be350247d292edce7c7c6c3d44 is 50, key is test_row_0/B:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,616 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#34 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:44,616 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb is 50, key is test_row_0/A:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741873_1049 (size=12527) 2024-11-17T01:26:44,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741872_1048 (size=12527) 2024-11-17T01:26:44,667 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c2eb10be350247d292edce7c7c6c3d44 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c2eb10be350247d292edce7c7c6c3d44 2024-11-17T01:26:44,680 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into c2eb10be350247d292edce7c7c6c3d44(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:44,680 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:44,680 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=12, startTime=1731806804579; duration=0sec 2024-11-17T01:26:44,680 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:44,681 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:44,681 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:26:44,684 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46150 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:26:44,684 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:44,684 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:44,684 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/99ba2b644a6749259aebfbac9d79e145, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=45.1 K 2024-11-17T01:26:44,685 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 99ba2b644a6749259aebfbac9d79e145, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1731806802400 2024-11-17T01:26:44,686 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting cd55ccae16bf4a12b8fd4fd8ddd18591, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731806803200 2024-11-17T01:26:44,687 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e3499440d2b47028aa189c3ce1bd6d5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=139, earliestPutTs=1731806803359 2024-11-17T01:26:44,688 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 961c322e72f645f2b510d76de0ada9c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:44,717 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#35 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:44,718 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/bf1244a792024784892c7f9b3392591b is 50, key is test_row_0/C:col10/1731806804406/Put/seqid=0 2024-11-17T01:26:44,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741874_1050 (size=12527) 2024-11-17T01:26:44,747 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/bf1244a792024784892c7f9b3392591b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/bf1244a792024784892c7f9b3392591b 2024-11-17T01:26:44,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:44,752 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:44,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/28b84e8d41104576b0e3cca38fc6397f is 50, key is test_row_0/A:col10/1731806804417/Put/seqid=0 2024-11-17T01:26:44,762 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into bf1244a792024784892c7f9b3392591b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:44,762 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:44,762 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=12, startTime=1731806804580; duration=0sec 2024-11-17T01:26:44,762 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:44,763 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:44,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741875_1051 (size=12151) 2024-11-17T01:26:44,774 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/28b84e8d41104576b0e3cca38fc6397f 2024-11-17T01:26:44,791 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/46a2686437e44a1887064a82545b9b1c is 50, key is test_row_0/B:col10/1731806804417/Put/seqid=0 2024-11-17T01:26:44,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741876_1052 (size=12151) 2024-11-17T01:26:44,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/46a2686437e44a1887064a82545b9b1c 2024-11-17T01:26:44,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e7a2decfae1429ea1c56b4a6cce5a76 is 50, key is test_row_0/C:col10/1731806804417/Put/seqid=0 2024-11-17T01:26:44,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741877_1053 (size=12151) 2024-11-17T01:26:44,840 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=178 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e7a2decfae1429ea1c56b4a6cce5a76 2024-11-17T01:26:44,853 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/28b84e8d41104576b0e3cca38fc6397f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f 2024-11-17T01:26:44,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f, entries=150, sequenceid=178, filesize=11.9 K 2024-11-17T01:26:44,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/46a2686437e44a1887064a82545b9b1c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c 2024-11-17T01:26:44,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806864878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:44,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c, entries=150, sequenceid=178, filesize=11.9 K 2024-11-17T01:26:44,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4e7a2decfae1429ea1c56b4a6cce5a76 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76 2024-11-17T01:26:44,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76, entries=150, sequenceid=178, filesize=11.9 K 2024-11-17T01:26:44,902 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 733401ed1ccb71c159e3f227c30cedc7 in 151ms, sequenceid=178, compaction requested=false 2024-11-17T01:26:44,902 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:44,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:44,986 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:26:44,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:44,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:44,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,986 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:44,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:44,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/104396edf912489a8036fa7332f2da01 is 50, key is test_row_0/A:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,024 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:45,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741878_1054 (size=14541) 2024-11-17T01:26:45,026 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/104396edf912489a8036fa7332f2da01 2024-11-17T01:26:45,043 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4fdbfacbf0554334b59df1346781dbe3 is 50, key is test_row_0/B:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,057 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb 2024-11-17T01:26:45,068 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into d9f0e9ad46c544e2bf5e2b5dbdcca9fb(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:45,068 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,068 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=12, startTime=1731806804579; duration=0sec 2024-11-17T01:26:45,068 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:45,068 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:45,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741879_1055 (size=12151) 2024-11-17T01:26:45,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4fdbfacbf0554334b59df1346781dbe3 2024-11-17T01:26:45,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d727a4c2d66c445c9123ed9893e54e48 is 50, key is test_row_0/C:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741880_1056 (size=12151) 2024-11-17T01:26:45,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d727a4c2d66c445c9123ed9893e54e48 2024-11-17T01:26:45,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/104396edf912489a8036fa7332f2da01 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01 2024-11-17T01:26:45,118 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01, entries=200, sequenceid=205, filesize=14.2 K 2024-11-17T01:26:45,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4fdbfacbf0554334b59df1346781dbe3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3 2024-11-17T01:26:45,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:45,131 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3, entries=150, sequenceid=205, filesize=11.9 K 2024-11-17T01:26:45,133 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d727a4c2d66c445c9123ed9893e54e48 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48 2024-11-17T01:26:45,147 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48, entries=150, sequenceid=205, filesize=11.9 K 2024-11-17T01:26:45,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 733401ed1ccb71c159e3f227c30cedc7 in 163ms, sequenceid=205, compaction requested=true 2024-11-17T01:26:45,148 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:45,149 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:45,149 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:45,150 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:45,150 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:45,150 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:45,151 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:45,151 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:45,151 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:45,151 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c2eb10be350247d292edce7c7c6c3d44, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.0 K 2024-11-17T01:26:45,151 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=38.3 K 2024-11-17T01:26:45,152 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c2eb10be350247d292edce7c7c6c3d44, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:45,152 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9f0e9ad46c544e2bf5e2b5dbdcca9fb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:45,153 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28b84e8d41104576b0e3cca38fc6397f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731806804417 2024-11-17T01:26:45,154 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 46a2686437e44a1887064a82545b9b1c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731806804417 2024-11-17T01:26:45,154 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 104396edf912489a8036fa7332f2da01, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:45,155 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fdbfacbf0554334b59df1346781dbe3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:45,176 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#42 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:45,177 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/6f31e57a3b09404980f6d98f64589e7e is 50, key is test_row_0/A:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,185 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#43 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:45,186 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9425521c046f4d8db6ca81c170a6b5b5 is 50, key is test_row_0/B:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741881_1057 (size=12629) 2024-11-17T01:26:45,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741882_1058 (size=12629) 2024-11-17T01:26:45,223 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9425521c046f4d8db6ca81c170a6b5b5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9425521c046f4d8db6ca81c170a6b5b5 2024-11-17T01:26:45,233 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 9425521c046f4d8db6ca81c170a6b5b5(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:45,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,234 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806805149; duration=0sec 2024-11-17T01:26:45,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:45,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:45,235 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:45,236 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:45,236 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:45,237 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:45,237 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/bf1244a792024784892c7f9b3392591b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.0 K 2024-11-17T01:26:45,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bf1244a792024784892c7f9b3392591b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731806803769 2024-11-17T01:26:45,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e7a2decfae1429ea1c56b4a6cce5a76, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=178, earliestPutTs=1731806804417 2024-11-17T01:26:45,239 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d727a4c2d66c445c9123ed9893e54e48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:45,255 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#44 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:45,256 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/f20b44a2b721400a8da19519329351b4 is 50, key is test_row_0/C:col10/1731806804984/Put/seqid=0 2024-11-17T01:26:45,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741883_1059 (size=12629) 2024-11-17T01:26:45,279 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/f20b44a2b721400a8da19519329351b4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f20b44a2b721400a8da19519329351b4 2024-11-17T01:26:45,290 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into f20b44a2b721400a8da19519329351b4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:45,290 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,290 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806805149; duration=0sec 2024-11-17T01:26:45,290 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:45,291 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:45,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:45,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:45,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:45,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:45,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:45,342 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/89b2a0687c4f404da53e51cb51aeb751 is 50, key is test_row_0/A:col10/1731806805019/Put/seqid=0 2024-11-17T01:26:45,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741884_1060 (size=12151) 2024-11-17T01:26:45,371 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/89b2a0687c4f404da53e51cb51aeb751 2024-11-17T01:26:45,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/cc86d93b26784249bc91a7c6d2e32e6c is 50, key is test_row_0/B:col10/1731806805019/Put/seqid=0 2024-11-17T01:26:45,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741885_1061 (size=12151) 2024-11-17T01:26:45,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/cc86d93b26784249bc91a7c6d2e32e6c 2024-11-17T01:26:45,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fc14c90b5d67463888d6cf5b40fc263e is 50, key is test_row_0/C:col10/1731806805019/Put/seqid=0 2024-11-17T01:26:45,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741886_1062 (size=12151) 2024-11-17T01:26:45,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=219 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fc14c90b5d67463888d6cf5b40fc263e 2024-11-17T01:26:45,438 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:45,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/89b2a0687c4f404da53e51cb51aeb751 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751 2024-11-17T01:26:45,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751, entries=150, sequenceid=219, filesize=11.9 K 2024-11-17T01:26:45,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/cc86d93b26784249bc91a7c6d2e32e6c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c 2024-11-17T01:26:45,460 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c, entries=150, sequenceid=219, filesize=11.9 K 2024-11-17T01:26:45,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fc14c90b5d67463888d6cf5b40fc263e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e 2024-11-17T01:26:45,472 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e, entries=150, sequenceid=219, filesize=11.9 K 2024-11-17T01:26:45,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 733401ed1ccb71c159e3f227c30cedc7 in 140ms, sequenceid=219, compaction requested=false 2024-11-17T01:26:45,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:45,543 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:26:45,543 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:45,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:45,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:45,544 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:45,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/91e358e5e96e4894a5c4bc8176179008 is 50, key is test_row_0/A:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:45,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741887_1063 (size=14541) 2024-11-17T01:26:45,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/91e358e5e96e4894a5c4bc8176179008 2024-11-17T01:26:45,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/57bc1d480ee44c2ca86550724923f399 is 50, key is test_row_0/B:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:45,635 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/6f31e57a3b09404980f6d98f64589e7e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/6f31e57a3b09404980f6d98f64589e7e 2024-11-17T01:26:45,639 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:45,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741888_1064 (size=12151) 2024-11-17T01:26:45,645 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 6f31e57a3b09404980f6d98f64589e7e(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:45,645 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:45,645 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806805148; duration=0sec 2024-11-17T01:26:45,645 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:45,645 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:45,742 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:45,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:45,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806865945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:46,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-17T01:26:46,038 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-17T01:26:46,042 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/57bc1d480ee44c2ca86550724923f399 2024-11-17T01:26:46,044 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-17T01:26:46,048 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:46,051 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:46,052 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:46,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-17T01:26:46,068 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/3af32d9280eb4af3adf8a4bef48cbebf is 50, key is test_row_0/C:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:46,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741889_1065 (size=12151) 2024-11-17T01:26:46,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=245 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/3af32d9280eb4af3adf8a4bef48cbebf 2024-11-17T01:26:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/91e358e5e96e4894a5c4bc8176179008 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008 2024-11-17T01:26:46,108 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008, entries=200, sequenceid=245, filesize=14.2 K 2024-11-17T01:26:46,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/57bc1d480ee44c2ca86550724923f399 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399 2024-11-17T01:26:46,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399, entries=150, sequenceid=245, filesize=11.9 K 2024-11-17T01:26:46,125 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/3af32d9280eb4af3adf8a4bef48cbebf as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf 2024-11-17T01:26:46,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf, entries=150, sequenceid=245, filesize=11.9 K 2024-11-17T01:26:46,139 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 733401ed1ccb71c159e3f227c30cedc7 in 596ms, sequenceid=245, compaction requested=true 2024-11-17T01:26:46,139 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:46,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:46,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:46,139 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:46,139 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:46,141 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39321 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:46,141 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:46,141 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:46,142 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/6f31e57a3b09404980f6d98f64589e7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=38.4 K 2024-11-17T01:26:46,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:46,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:46,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:46,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:46,142 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:46,143 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:46,143 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:46,143 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9425521c046f4d8db6ca81c170a6b5b5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.1 K 2024-11-17T01:26:46,143 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f31e57a3b09404980f6d98f64589e7e, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:46,144 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9425521c046f4d8db6ca81c170a6b5b5, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:46,144 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89b2a0687c4f404da53e51cb51aeb751, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731806805011 2024-11-17T01:26:46,145 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting cc86d93b26784249bc91a7c6d2e32e6c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731806805011 2024-11-17T01:26:46,145 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91e358e5e96e4894a5c4bc8176179008, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805406 2024-11-17T01:26:46,146 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 57bc1d480ee44c2ca86550724923f399, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805422 2024-11-17T01:26:46,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-17T01:26:46,160 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#51 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:46,161 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/1c5318a31bea418fa9e0deb7cbd6618a is 50, key is test_row_0/A:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:46,168 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#52 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:46,169 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e5d557bbe9af40c6be6c48706f299e67 is 50, key is test_row_0/B:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:46,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741890_1066 (size=12731) 2024-11-17T01:26:46,201 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/1c5318a31bea418fa9e0deb7cbd6618a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/1c5318a31bea418fa9e0deb7cbd6618a 2024-11-17T01:26:46,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:46,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-17T01:26:46,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:46,207 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-17T01:26:46,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:46,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f5bdfcd40fa42fdb21fe863eee83781 is 50, key is test_row_0/A:col10/1731806805569/Put/seqid=0 2024-11-17T01:26:46,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741891_1067 (size=12731) 2024-11-17T01:26:46,226 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 1c5318a31bea418fa9e0deb7cbd6618a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:46,226 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:46,226 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806806139; duration=0sec 2024-11-17T01:26:46,226 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:46,226 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:46,227 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:46,230 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:46,230 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:46,230 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:46,230 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f20b44a2b721400a8da19519329351b4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.1 K 2024-11-17T01:26:46,231 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f20b44a2b721400a8da19519329351b4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731806804862 2024-11-17T01:26:46,231 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc14c90b5d67463888d6cf5b40fc263e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=219, earliestPutTs=1731806805011 2024-11-17T01:26:46,232 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3af32d9280eb4af3adf8a4bef48cbebf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805422 2024-11-17T01:26:46,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741892_1068 (size=12151) 2024-11-17T01:26:46,239 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f5bdfcd40fa42fdb21fe863eee83781 2024-11-17T01:26:46,247 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#54 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:46,248 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d928c411332b43489c56e82efbc3bab0 is 50, key is test_row_0/C:col10/1731806805541/Put/seqid=0 2024-11-17T01:26:46,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:46,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:46,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/fc4010d7b06f487a8dcf8127cad93ecb is 50, key is test_row_0/B:col10/1731806805569/Put/seqid=0 2024-11-17T01:26:46,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741893_1069 (size=12731) 2024-11-17T01:26:46,292 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d928c411332b43489c56e82efbc3bab0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d928c411332b43489c56e82efbc3bab0 2024-11-17T01:26:46,303 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into d928c411332b43489c56e82efbc3bab0(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:46,303 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:46,303 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806806142; duration=0sec 2024-11-17T01:26:46,304 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:46,304 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:46,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741894_1070 (size=12151) 2024-11-17T01:26:46,309 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/fc4010d7b06f487a8dcf8127cad93ecb 2024-11-17T01:26:46,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/eb77af869e1b43fb8e2cf719662c26b6 is 50, key is test_row_0/C:col10/1731806805569/Put/seqid=0 2024-11-17T01:26:46,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741895_1071 (size=12151) 2024-11-17T01:26:46,353 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:46,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 253 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806866350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-17T01:26:46,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 255 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806866454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:46,630 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e5d557bbe9af40c6be6c48706f299e67 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e5d557bbe9af40c6be6c48706f299e67 2024-11-17T01:26:46,639 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into e5d557bbe9af40c6be6c48706f299e67(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:46,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:46,639 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806806139; duration=0sec 2024-11-17T01:26:46,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:46,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:46,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-17T01:26:46,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806866658, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:46,737 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=256 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/eb77af869e1b43fb8e2cf719662c26b6 2024-11-17T01:26:46,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/0f5bdfcd40fa42fdb21fe863eee83781 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781 2024-11-17T01:26:46,751 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781, entries=150, sequenceid=256, filesize=11.9 K 2024-11-17T01:26:46,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/fc4010d7b06f487a8dcf8127cad93ecb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb 2024-11-17T01:26:46,759 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb, entries=150, sequenceid=256, filesize=11.9 K 2024-11-17T01:26:46,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/eb77af869e1b43fb8e2cf719662c26b6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6 2024-11-17T01:26:46,769 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6, entries=150, sequenceid=256, filesize=11.9 K 2024-11-17T01:26:46,771 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 733401ed1ccb71c159e3f227c30cedc7 in 563ms, sequenceid=256, compaction requested=false 2024-11-17T01:26:46,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:46,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:46,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-17T01:26:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-17T01:26:46,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-17T01:26:46,776 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 722 msec 2024-11-17T01:26:46,780 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 733 msec 2024-11-17T01:26:46,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:46,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:46,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:46,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/21b4b5530fac4d3e92a9d61b12366520 is 50, key is test_row_0/A:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:46,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741896_1072 (size=14741) 2024-11-17T01:26:46,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:46,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806866991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,097 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:47,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806867095, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-17T01:26:47,159 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-17T01:26:47,160 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:47,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-17T01:26:47,163 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:47,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:47,164 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:47,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:47,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:47,305 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:47,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 270 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806867304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-17T01:26:47,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:47,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,389 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/21b4b5530fac4d3e92a9d61b12366520 2024-11-17T01:26:47,411 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/02ec49fa90de4d548f9a8bddd04ee38e is 50, key is test_row_0/B:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:47,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741897_1073 (size=12301) 2024-11-17T01:26:47,430 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/02ec49fa90de4d548f9a8bddd04ee38e 2024-11-17T01:26:47,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/b61e4f4dd38c4f2f89fa299b2ce87c26 is 50, key is test_row_0/C:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:47,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:47,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741898_1074 (size=12301) 2024-11-17T01:26:47,471 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,471 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-17T01:26:47,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:47,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,472 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:47,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806867609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-17T01:26:47,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:47,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:47,779 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,780 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-17T01:26:47,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:47,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,781 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,781 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:47,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=286 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/b61e4f4dd38c4f2f89fa299b2ce87c26 2024-11-17T01:26:47,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/21b4b5530fac4d3e92a9d61b12366520 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520 2024-11-17T01:26:47,889 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520, entries=200, sequenceid=286, filesize=14.4 K 2024-11-17T01:26:47,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/02ec49fa90de4d548f9a8bddd04ee38e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e 2024-11-17T01:26:47,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e, entries=150, sequenceid=286, filesize=12.0 K 2024-11-17T01:26:47,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/b61e4f4dd38c4f2f89fa299b2ce87c26 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26 2024-11-17T01:26:47,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26, entries=150, sequenceid=286, filesize=12.0 K 2024-11-17T01:26:47,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 733401ed1ccb71c159e3f227c30cedc7 in 941ms, sequenceid=286, compaction requested=true 2024-11-17T01:26:47,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:47,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:47,909 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:47,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:47,910 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:47,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:47,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:47,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:47,911 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:47,911 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:47,911 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:47,911 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,911 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e5d557bbe9af40c6be6c48706f299e67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.3 K 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39623 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e5d557bbe9af40c6be6c48706f299e67, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805422 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:47,912 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,912 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/1c5318a31bea418fa9e0deb7cbd6618a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=38.7 K 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting fc4010d7b06f487a8dcf8127cad93ecb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731806805569 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 02ec49fa90de4d548f9a8bddd04ee38e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:47,912 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c5318a31bea418fa9e0deb7cbd6618a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805422 2024-11-17T01:26:47,913 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f5bdfcd40fa42fdb21fe863eee83781, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731806805569 2024-11-17T01:26:47,914 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 21b4b5530fac4d3e92a9d61b12366520, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:47,928 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#60 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:47,929 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/b990dc36d93d436cb58eed7a5acfdf80 is 50, key is test_row_0/B:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:47,932 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:47,933 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:47,933 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/5a1ad0cebbd7422eb06620e9a154a55d is 50, key is test_row_0/A:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:47,934 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-17T01:26:47,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,934 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-17T01:26:47,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:47,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:47,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:47,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:47,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:47,935 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:47,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741900_1076 (size=12983) 2024-11-17T01:26:47,957 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/5a1ad0cebbd7422eb06620e9a154a55d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/5a1ad0cebbd7422eb06620e9a154a55d 2024-11-17T01:26:47,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/af6e485ddece409eb54064b9ca96e92f is 50, key is test_row_0/A:col10/1731806806969/Put/seqid=0 2024-11-17T01:26:47,968 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 5a1ad0cebbd7422eb06620e9a154a55d(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:47,968 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:47,968 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806807909; duration=0sec 2024-11-17T01:26:47,968 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:47,968 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:47,968 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:47,970 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:47,970 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:47,970 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:47,970 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d928c411332b43489c56e82efbc3bab0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.3 K 2024-11-17T01:26:47,971 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting d928c411332b43489c56e82efbc3bab0, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=245, earliestPutTs=1731806805422 2024-11-17T01:26:47,972 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb77af869e1b43fb8e2cf719662c26b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=256, earliestPutTs=1731806805569 2024-11-17T01:26:47,972 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b61e4f4dd38c4f2f89fa299b2ce87c26, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:47,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741899_1075 (size=12983) 2024-11-17T01:26:47,981 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/b990dc36d93d436cb58eed7a5acfdf80 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/b990dc36d93d436cb58eed7a5acfdf80 2024-11-17T01:26:47,993 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into b990dc36d93d436cb58eed7a5acfdf80(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:47,994 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:47,994 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806807909; duration=0sec 2024-11-17T01:26:47,994 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:47,994 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:47,994 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#63 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:47,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741901_1077 (size=9857) 2024-11-17T01:26:47,995 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a422b453de854eb7856a2c34e203c199 is 50, key is test_row_0/C:col10/1731806806965/Put/seqid=0 2024-11-17T01:26:48,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741902_1078 (size=12983) 2024-11-17T01:26:48,116 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:48,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:48,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 298 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806868177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806868213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,216 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8210 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:48,231 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806868229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,232 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8231 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:48,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806868241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,243 DEBUG [Thread-155 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8246 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:48,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806868243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,248 DEBUG [Thread-157 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8247 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:48,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:48,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 300 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806868281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,396 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/af6e485ddece409eb54064b9ca96e92f 2024-11-17T01:26:48,418 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a422b453de854eb7856a2c34e203c199 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a422b453de854eb7856a2c34e203c199 2024-11-17T01:26:48,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/f7c559e690d24224ad4a9744c2a3112b is 50, key is test_row_0/B:col10/1731806806969/Put/seqid=0 2024-11-17T01:26:48,434 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into a422b453de854eb7856a2c34e203c199(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:48,434 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:48,434 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806807911; duration=0sec 2024-11-17T01:26:48,435 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:48,435 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:48,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741903_1079 (size=9857) 2024-11-17T01:26:48,450 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/f7c559e690d24224ad4a9744c2a3112b 2024-11-17T01:26:48,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a5804dc3bd7241fc86da36c591dfc081 is 50, key is test_row_0/C:col10/1731806806969/Put/seqid=0 2024-11-17T01:26:48,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741904_1080 (size=9857) 2024-11-17T01:26:48,487 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806868485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:48,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806868788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:48,885 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a5804dc3bd7241fc86da36c591dfc081 2024-11-17T01:26:48,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/af6e485ddece409eb54064b9ca96e92f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f 2024-11-17T01:26:48,900 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f, entries=100, sequenceid=295, filesize=9.6 K 2024-11-17T01:26:48,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/f7c559e690d24224ad4a9744c2a3112b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b 2024-11-17T01:26:48,909 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b, entries=100, sequenceid=295, filesize=9.6 K 2024-11-17T01:26:48,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a5804dc3bd7241fc86da36c591dfc081 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081 2024-11-17T01:26:48,921 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081, entries=100, sequenceid=295, filesize=9.6 K 2024-11-17T01:26:48,922 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 733401ed1ccb71c159e3f227c30cedc7 in 988ms, sequenceid=295, compaction requested=false 2024-11-17T01:26:48,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:48,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:48,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-17T01:26:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-17T01:26:48,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-17T01:26:48,927 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7600 sec 2024-11-17T01:26:48,931 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.7680 sec 2024-11-17T01:26:49,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-17T01:26:49,268 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-17T01:26:49,269 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:49,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-17T01:26:49,272 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:49,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-17T01:26:49,273 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:49,273 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:49,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:49,293 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:49,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7cf8be94018f4526885476ed92fb582c is 50, key is test_row_0/A:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741905_1081 (size=14741) 2024-11-17T01:26:49,307 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7cf8be94018f4526885476ed92fb582c 2024-11-17T01:26:49,311 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:49,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 312 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806869311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/38739dd541ac4a02a3aaf750d255f226 is 50, key is test_row_0/B:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741906_1082 (size=12301) 2024-11-17T01:26:49,339 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/38739dd541ac4a02a3aaf750d255f226 2024-11-17T01:26:49,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/95159b875dd844e8887d41c2d773774b is 50, key is test_row_0/C:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741907_1083 (size=12301) 2024-11-17T01:26:49,363 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/95159b875dd844e8887d41c2d773774b 2024-11-17T01:26:49,368 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7cf8be94018f4526885476ed92fb582c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c 2024-11-17T01:26:49,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-17T01:26:49,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c, entries=200, sequenceid=326, filesize=14.4 K 2024-11-17T01:26:49,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/38739dd541ac4a02a3aaf750d255f226 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226 2024-11-17T01:26:49,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226, entries=150, sequenceid=326, filesize=12.0 K 2024-11-17T01:26:49,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/95159b875dd844e8887d41c2d773774b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b 2024-11-17T01:26:49,392 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b, entries=150, sequenceid=326, filesize=12.0 K 2024-11-17T01:26:49,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 733401ed1ccb71c159e3f227c30cedc7 in 100ms, sequenceid=326, compaction requested=true 2024-11-17T01:26:49,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:49,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:49,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:49,393 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:49,393 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:49,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:49,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:49,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:49,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:49,395 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37581 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:49,395 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:49,395 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:49,395 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:49,395 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:49,395 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/5a1ad0cebbd7422eb06620e9a154a55d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.7 K 2024-11-17T01:26:49,396 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:49,396 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/b990dc36d93d436cb58eed7a5acfdf80, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=34.3 K 2024-11-17T01:26:49,396 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a1ad0cebbd7422eb06620e9a154a55d, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:49,396 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting b990dc36d93d436cb58eed7a5acfdf80, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:49,397 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f7c559e690d24224ad4a9744c2a3112b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731806806969 2024-11-17T01:26:49,397 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting af6e485ddece409eb54064b9ca96e92f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731806806969 2024-11-17T01:26:49,398 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7cf8be94018f4526885476ed92fb582c, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:49,398 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 38739dd541ac4a02a3aaf750d255f226, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:49,427 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,428 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-17T01:26:49,428 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#69 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:49,428 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#70 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:49,429 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/b980786db294440a91660afde9d8d87c is 50, key is test_row_0/A:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,429 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/784265df201c45e8addc1c4d625da8fd is 50, key is test_row_0/B:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:49,429 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:49,430 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:49,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/bc50fed2d4954c23913f3d60d8c20bb8 is 50, key is test_row_0/A:col10/1731806809303/Put/seqid=0 2024-11-17T01:26:49,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741908_1084 (size=13085) 2024-11-17T01:26:49,461 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/b980786db294440a91660afde9d8d87c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b980786db294440a91660afde9d8d87c 2024-11-17T01:26:49,469 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into b980786db294440a91660afde9d8d87c(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:49,469 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:49,469 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806809393; duration=0sec 2024-11-17T01:26:49,470 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:49,470 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:49,470 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:49,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741909_1085 (size=13085) 2024-11-17T01:26:49,473 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:49,473 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:49,473 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:49,473 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a422b453de854eb7856a2c34e203c199, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=34.3 K 2024-11-17T01:26:49,474 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a422b453de854eb7856a2c34e203c199, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=286, earliestPutTs=1731806806340 2024-11-17T01:26:49,475 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a5804dc3bd7241fc86da36c591dfc081, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731806806969 2024-11-17T01:26:49,475 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95159b875dd844e8887d41c2d773774b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:49,479 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/784265df201c45e8addc1c4d625da8fd as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/784265df201c45e8addc1c4d625da8fd 2024-11-17T01:26:49,488 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 784265df201c45e8addc1c4d625da8fd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:49,488 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:49,488 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806809393; duration=0sec 2024-11-17T01:26:49,488 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:49,488 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:49,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741910_1086 (size=14741) 2024-11-17T01:26:49,491 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/bc50fed2d4954c23913f3d60d8c20bb8 2024-11-17T01:26:49,491 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#72 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:49,493 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/6f91d56450ac43fd8b8394ff45a54ac8 is 50, key is test_row_0/C:col10/1731806808162/Put/seqid=0 2024-11-17T01:26:49,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9931f555f2f74de8ab25f5e79146e0d8 is 50, key is test_row_0/B:col10/1731806809303/Put/seqid=0 2024-11-17T01:26:49,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741911_1087 (size=13085) 2024-11-17T01:26:49,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741912_1088 (size=12301) 2024-11-17T01:26:49,512 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9931f555f2f74de8ab25f5e79146e0d8 2024-11-17T01:26:49,518 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/6f91d56450ac43fd8b8394ff45a54ac8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/6f91d56450ac43fd8b8394ff45a54ac8 2024-11-17T01:26:49,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1bd772ff45964fb8b9ee3ecb9eea55c9 is 50, key is test_row_0/C:col10/1731806809303/Put/seqid=0 2024-11-17T01:26:49,529 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 6f91d56450ac43fd8b8394ff45a54ac8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:49,529 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:49,529 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806809394; duration=0sec 2024-11-17T01:26:49,529 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:49,529 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:49,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741913_1089 (size=12301) 2024-11-17T01:26:49,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:49,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 339 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806869534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,538 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1bd772ff45964fb8b9ee3ecb9eea55c9 2024-11-17T01:26:49,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/bc50fed2d4954c23913f3d60d8c20bb8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8 2024-11-17T01:26:49,552 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8, entries=200, sequenceid=335, filesize=14.4 K 2024-11-17T01:26:49,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9931f555f2f74de8ab25f5e79146e0d8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8 2024-11-17T01:26:49,560 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8, entries=150, sequenceid=335, filesize=12.0 K 2024-11-17T01:26:49,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1bd772ff45964fb8b9ee3ecb9eea55c9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9 2024-11-17T01:26:49,570 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9, entries=150, sequenceid=335, filesize=12.0 K 2024-11-17T01:26:49,571 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 733401ed1ccb71c159e3f227c30cedc7 in 142ms, sequenceid=335, compaction requested=false 2024-11-17T01:26:49,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:49,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:49,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-17T01:26:49,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-17T01:26:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-17T01:26:49,576 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-17T01:26:49,576 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 300 msec 2024-11-17T01:26:49,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 308 msec 2024-11-17T01:26:49,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:49,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-17T01:26:49,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:49,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:49,640 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:49,641 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:49,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/ca932dc980294ac3ac5a02ec9374e439 is 50, key is test_row_0/A:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:49,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741914_1090 (size=14741) 2024-11-17T01:26:49,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:49,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 347 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806869655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 349 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806869758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-17T01:26:49,875 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-17T01:26:49,876 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:49,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-17T01:26:49,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:49,878 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:49,879 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:49,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:49,966 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:49,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 351 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806869964, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:49,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:50,030 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,031 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-17T01:26:50,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:50,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,031 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:50,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:50,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:50,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/ca932dc980294ac3ac5a02ec9374e439 2024-11-17T01:26:50,063 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/902ecf25df7e4c20a81c081264877d5c is 50, key is test_row_0/B:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:50,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741915_1091 (size=12301) 2024-11-17T01:26:50,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/902ecf25df7e4c20a81c081264877d5c 2024-11-17T01:26:50,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/155ce6ca7ab44f438cefbaa5428bae86 is 50, key is test_row_0/C:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:50,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741916_1092 (size=12301) 2024-11-17T01:26:50,102 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/155ce6ca7ab44f438cefbaa5428bae86 2024-11-17T01:26:50,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/ca932dc980294ac3ac5a02ec9374e439 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439 2024-11-17T01:26:50,116 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439, entries=200, sequenceid=366, filesize=14.4 K 2024-11-17T01:26:50,118 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/902ecf25df7e4c20a81c081264877d5c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c 2024-11-17T01:26:50,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c, entries=150, sequenceid=366, filesize=12.0 K 2024-11-17T01:26:50,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/155ce6ca7ab44f438cefbaa5428bae86 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86 2024-11-17T01:26:50,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86, entries=150, sequenceid=366, filesize=12.0 K 2024-11-17T01:26:50,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 733401ed1ccb71c159e3f227c30cedc7 in 498ms, sequenceid=366, compaction requested=true 2024-11-17T01:26:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:50,138 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:50,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:50,138 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:50,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:50,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:50,140 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:50,140 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:50,140 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42567 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:50,140 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:50,140 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,140 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,140 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/784265df201c45e8addc1c4d625da8fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.8 K 2024-11-17T01:26:50,140 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b980786db294440a91660afde9d8d87c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=41.6 K 2024-11-17T01:26:50,139 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:50,141 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b980786db294440a91660afde9d8d87c, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:50,141 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 784265df201c45e8addc1c4d625da8fd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:50,142 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc50fed2d4954c23913f3d60d8c20bb8, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1731806809296 2024-11-17T01:26:50,142 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9931f555f2f74de8ab25f5e79146e0d8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1731806809296 2024-11-17T01:26:50,142 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ca932dc980294ac3ac5a02ec9374e439, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:50,142 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 902ecf25df7e4c20a81c081264877d5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:50,160 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#78 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:50,161 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/80de5c968863443bbe74e3afba402841 is 50, key is test_row_0/A:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:50,167 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:50,168 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e88b8d77a452434985a5a1d0b1a5480d is 50, key is test_row_0/B:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:50,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741917_1093 (size=13187) 2024-11-17T01:26:50,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741918_1094 (size=13187) 2024-11-17T01:26:50,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:50,184 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-17T01:26:50,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,185 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:50,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:50,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/c22e7226e8e24bc8956105e2e1012bde is 50, key is test_row_0/A:col10/1731806809653/Put/seqid=0 2024-11-17T01:26:50,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741919_1095 (size=12301) 2024-11-17T01:26:50,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:50,274 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:50,333 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:50,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 378 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806870332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:50,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 380 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806870435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:50,575 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/80de5c968863443bbe74e3afba402841 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/80de5c968863443bbe74e3afba402841 2024-11-17T01:26:50,598 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/c22e7226e8e24bc8956105e2e1012bde 2024-11-17T01:26:50,627 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 80de5c968863443bbe74e3afba402841(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:50,627 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:50,627 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806810138; duration=0sec 2024-11-17T01:26:50,627 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:50,627 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:50,627 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:50,629 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:50,629 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:50,629 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:50,629 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/6f91d56450ac43fd8b8394ff45a54ac8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.8 K 2024-11-17T01:26:50,630 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f91d56450ac43fd8b8394ff45a54ac8, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731806808162 2024-11-17T01:26:50,630 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e88b8d77a452434985a5a1d0b1a5480d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e88b8d77a452434985a5a1d0b1a5480d 2024-11-17T01:26:50,631 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bd772ff45964fb8b9ee3ecb9eea55c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=335, earliestPutTs=1731806809296 2024-11-17T01:26:50,631 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 155ce6ca7ab44f438cefbaa5428bae86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:50,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/1e6b15d13cc84f21a708de601506125c is 50, key is test_row_0/B:col10/1731806809653/Put/seqid=0 2024-11-17T01:26:50,640 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into e88b8d77a452434985a5a1d0b1a5480d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:50,640 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:50,640 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806810138; duration=0sec 2024-11-17T01:26:50,640 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:50,640 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:50,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741920_1096 (size=12301) 2024-11-17T01:26:50,642 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#82 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:50,643 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/2271384f3fbb417e82ec98e6d8291ac0 is 50, key is test_row_0/C:col10/1731806809638/Put/seqid=0 2024-11-17T01:26:50,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:50,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 382 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806870640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741921_1097 (size=13187) 2024-11-17T01:26:50,666 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/2271384f3fbb417e82ec98e6d8291ac0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2271384f3fbb417e82ec98e6d8291ac0 2024-11-17T01:26:50,673 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 2271384f3fbb417e82ec98e6d8291ac0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:50,674 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:50,674 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806810139; duration=0sec 2024-11-17T01:26:50,674 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:50,674 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:50,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:50,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 384 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806870946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:50,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:51,041 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/1e6b15d13cc84f21a708de601506125c 2024-11-17T01:26:51,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e5f8cae544cf4ac884118874b069e28f is 50, key is test_row_0/C:col10/1731806809653/Put/seqid=0 2024-11-17T01:26:51,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741922_1098 (size=12301) 2024-11-17T01:26:51,063 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e5f8cae544cf4ac884118874b069e28f 2024-11-17T01:26:51,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/c22e7226e8e24bc8956105e2e1012bde as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde 2024-11-17T01:26:51,077 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde, entries=150, sequenceid=374, filesize=12.0 K 2024-11-17T01:26:51,079 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/1e6b15d13cc84f21a708de601506125c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c 2024-11-17T01:26:51,086 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c, entries=150, sequenceid=374, filesize=12.0 K 2024-11-17T01:26:51,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e5f8cae544cf4ac884118874b069e28f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f 2024-11-17T01:26:51,095 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f, entries=150, sequenceid=374, filesize=12.0 K 2024-11-17T01:26:51,096 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=167.72 KB/171750 for 733401ed1ccb71c159e3f227c30cedc7 in 912ms, sequenceid=374, compaction requested=false 2024-11-17T01:26:51,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:51,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:51,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-17T01:26:51,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-17T01:26:51,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-17T01:26:51,100 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2190 sec 2024-11-17T01:26:51,102 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 1.2250 sec 2024-11-17T01:26:51,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:51,452 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-17T01:26:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:51,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:51,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:51,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:51,453 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:51,459 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4b6eff32b4c94167b17f34c0ec347940 is 50, key is test_row_0/A:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:51,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:51,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 391 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806871473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:51,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741923_1099 (size=14741) 2024-11-17T01:26:51,476 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4b6eff32b4c94167b17f34c0ec347940 2024-11-17T01:26:51,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/693b82f9040c40e08b25d23bde122c08 is 50, key is test_row_0/B:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:51,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741924_1100 (size=12301) 2024-11-17T01:26:51,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/693b82f9040c40e08b25d23bde122c08 2024-11-17T01:26:51,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/5da5d75de38046e1b9c4e02d90484067 is 50, key is test_row_0/C:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:51,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741925_1101 (size=12301) 2024-11-17T01:26:51,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:51,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 393 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806871577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:51,782 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:51,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 395 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806871781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:51,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=406 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/5da5d75de38046e1b9c4e02d90484067 2024-11-17T01:26:51,922 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/4b6eff32b4c94167b17f34c0ec347940 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940 2024-11-17T01:26:51,928 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940, entries=200, sequenceid=406, filesize=14.4 K 2024-11-17T01:26:51,929 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/693b82f9040c40e08b25d23bde122c08 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08 2024-11-17T01:26:51,934 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08, entries=150, sequenceid=406, filesize=12.0 K 2024-11-17T01:26:51,935 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/5da5d75de38046e1b9c4e02d90484067 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067 2024-11-17T01:26:51,940 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067, entries=150, sequenceid=406, filesize=12.0 K 2024-11-17T01:26:51,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 733401ed1ccb71c159e3f227c30cedc7 in 489ms, sequenceid=406, compaction requested=true 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:51,941 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:51,941 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:51,941 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:51,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40229 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:51,943 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:51,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:51,943 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:51,943 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:51,943 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:51,943 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/80de5c968863443bbe74e3afba402841, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=39.3 K 2024-11-17T01:26:51,943 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e88b8d77a452434985a5a1d0b1a5480d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.9 K 2024-11-17T01:26:51,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80de5c968863443bbe74e3afba402841, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:51,944 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e88b8d77a452434985a5a1d0b1a5480d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:51,944 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting c22e7226e8e24bc8956105e2e1012bde, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731806809643 2024-11-17T01:26:51,944 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e6b15d13cc84f21a708de601506125c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731806809643 2024-11-17T01:26:51,944 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b6eff32b4c94167b17f34c0ec347940, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:51,945 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 693b82f9040c40e08b25d23bde122c08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:51,961 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#87 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:51,961 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/bc1e8b757c8d49dfad15efc0c62251bf is 50, key is test_row_0/A:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:51,964 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:51,965 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4da2e44e940d4a818cc206e3ac65320b is 50, key is test_row_0/B:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:51,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-17T01:26:51,982 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-17T01:26:51,984 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:51,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-17T01:26:51,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:51,986 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:51,987 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:51,987 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:51,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741927_1103 (size=13289) 2024-11-17T01:26:52,000 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4da2e44e940d4a818cc206e3ac65320b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4da2e44e940d4a818cc206e3ac65320b 2024-11-17T01:26:52,007 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 4da2e44e940d4a818cc206e3ac65320b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:52,008 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:52,008 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806811941; duration=0sec 2024-11-17T01:26:52,008 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:52,008 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:52,008 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:52,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741926_1102 (size=13289) 2024-11-17T01:26:52,012 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:52,012 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:52,012 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,013 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2271384f3fbb417e82ec98e6d8291ac0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=36.9 K 2024-11-17T01:26:52,014 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2271384f3fbb417e82ec98e6d8291ac0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1731806809471 2024-11-17T01:26:52,015 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e5f8cae544cf4ac884118874b069e28f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731806809643 2024-11-17T01:26:52,037 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5da5d75de38046e1b9c4e02d90484067, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:52,040 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/bc1e8b757c8d49dfad15efc0c62251bf as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc1e8b757c8d49dfad15efc0c62251bf 2024-11-17T01:26:52,048 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into bc1e8b757c8d49dfad15efc0c62251bf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:52,048 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:52,048 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806811941; duration=0sec 2024-11-17T01:26:52,049 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:52,049 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:52,052 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#89 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:52,053 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/3854a5837a754d9cbc36a1cad8b0413b is 50, key is test_row_0/C:col10/1731806810322/Put/seqid=0 2024-11-17T01:26:52,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741928_1104 (size=13289) 2024-11-17T01:26:52,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:52,092 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/3854a5837a754d9cbc36a1cad8b0413b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3854a5837a754d9cbc36a1cad8b0413b 2024-11-17T01:26:52,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:52,099 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:52,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:52,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:52,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,099 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:52,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,101 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 3854a5837a754d9cbc36a1cad8b0413b(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:52,101 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:52,101 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806811941; duration=0sec 2024-11-17T01:26:52,102 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:52,102 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:52,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/227d302e873048eca534a261d1fe8465 is 50, key is test_row_0/A:col10/1731806811471/Put/seqid=0 2024-11-17T01:26:52,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741929_1105 (size=14741) 2024-11-17T01:26:52,117 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/227d302e873048eca534a261d1fe8465 2024-11-17T01:26:52,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/962c132baf7f493993dbb3d6fd27e63b is 50, key is test_row_0/B:col10/1731806811471/Put/seqid=0 2024-11-17T01:26:52,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741930_1106 (size=12301) 2024-11-17T01:26:52,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/962c132baf7f493993dbb3d6fd27e63b 2024-11-17T01:26:52,140 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,141 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,144 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9bc198ae23944cbf8bd2a84206027e41 is 50, key is test_row_0/C:col10/1731806811471/Put/seqid=0 2024-11-17T01:26:52,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741931_1107 (size=12301) 2024-11-17T01:26:52,155 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=419 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9bc198ae23944cbf8bd2a84206027e41 2024-11-17T01:26:52,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/227d302e873048eca534a261d1fe8465 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465 2024-11-17T01:26:52,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465, entries=200, sequenceid=419, filesize=14.4 K 2024-11-17T01:26:52,173 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/962c132baf7f493993dbb3d6fd27e63b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b 2024-11-17T01:26:52,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b, entries=150, sequenceid=419, filesize=12.0 K 2024-11-17T01:26:52,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9bc198ae23944cbf8bd2a84206027e41 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41 2024-11-17T01:26:52,186 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41, entries=150, sequenceid=419, filesize=12.0 K 2024-11-17T01:26:52,188 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=134.18 KB/137400 for 733401ed1ccb71c159e3f227c30cedc7 in 89ms, sequenceid=419, compaction requested=false 2024-11-17T01:26:52,188 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:52,189 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:52,190 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:52,196 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/14430deec54242b298d4ef8e983b5df3 is 50, key is test_row_0/A:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:52,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741932_1108 (size=14741) 2024-11-17T01:26:52,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/14430deec54242b298d4ef8e983b5df3 2024-11-17T01:26:52,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 431 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806872224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/469e877155704edc8deca95045652d9c is 50, key is test_row_0/B:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:52,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741933_1109 (size=12301) 2024-11-17T01:26:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:52,296 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,296 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,297 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,328 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 433 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806872327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,449 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,449 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,450 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,450 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,530 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:52,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 435 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806872529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:52,602 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,603 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/469e877155704edc8deca95045652d9c 2024-11-17T01:26:52,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9de495aac7eb491f84de7071b15db693 is 50, key is test_row_0/C:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741934_1110 (size=12301) 2024-11-17T01:26:52,755 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,755 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,756 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:52,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 437 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806872833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,909 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:52,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:52,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:52,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:52,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:52,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:53,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9de495aac7eb491f84de7071b15db693 2024-11-17T01:26:53,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/14430deec54242b298d4ef8e983b5df3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3 2024-11-17T01:26:53,063 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:53,064 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:53,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:53,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:53,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:53,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:53,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3, entries=200, sequenceid=444, filesize=14.4 K 2024-11-17T01:26:53,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/469e877155704edc8deca95045652d9c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c 2024-11-17T01:26:53,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c, entries=150, sequenceid=444, filesize=12.0 K 2024-11-17T01:26:53,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/9de495aac7eb491f84de7071b15db693 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693 2024-11-17T01:26:53,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693, entries=150, sequenceid=444, filesize=12.0 K 2024-11-17T01:26:53,089 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 733401ed1ccb71c159e3f227c30cedc7 in 899ms, sequenceid=444, compaction requested=true 2024-11-17T01:26:53,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:53,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:53,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:53,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:53,089 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:53,089 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:53,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:53,091 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42771 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:53,091 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:53,091 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:53,091 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:53,091 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,091 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,091 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4da2e44e940d4a818cc206e3ac65320b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.0 K 2024-11-17T01:26:53,091 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc1e8b757c8d49dfad15efc0c62251bf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=41.8 K 2024-11-17T01:26:53,092 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4da2e44e940d4a818cc206e3ac65320b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:53,092 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc1e8b757c8d49dfad15efc0c62251bf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:53,092 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 962c132baf7f493993dbb3d6fd27e63b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1731806811471 2024-11-17T01:26:53,092 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 227d302e873048eca534a261d1fe8465, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1731806811471 2024-11-17T01:26:53,093 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 469e877155704edc8deca95045652d9c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812179 2024-11-17T01:26:53,093 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14430deec54242b298d4ef8e983b5df3, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812173 2024-11-17T01:26:53,105 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:53,106 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/015345933d034a488015837d033c4f40 is 50, key is test_row_0/A:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:53,116 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#97 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:53,117 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e296e700584344b3bf488448a7d26a7e is 50, key is test_row_0/B:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:53,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741935_1111 (size=13391) 2024-11-17T01:26:53,129 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/015345933d034a488015837d033c4f40 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/015345933d034a488015837d033c4f40 2024-11-17T01:26:53,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741936_1112 (size=13391) 2024-11-17T01:26:53,139 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 015345933d034a488015837d033c4f40(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:53,139 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:53,139 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806813089; duration=0sec 2024-11-17T01:26:53,139 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:53,139 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:53,139 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:53,144 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37891 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:53,144 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:53,144 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,144 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3854a5837a754d9cbc36a1cad8b0413b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.0 K 2024-11-17T01:26:53,145 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3854a5837a754d9cbc36a1cad8b0413b, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=406, earliestPutTs=1731806810322 2024-11-17T01:26:53,146 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bc198ae23944cbf8bd2a84206027e41, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=419, earliestPutTs=1731806811471 2024-11-17T01:26:53,146 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9de495aac7eb491f84de7071b15db693, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812179 2024-11-17T01:26:53,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/e296e700584344b3bf488448a7d26a7e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e296e700584344b3bf488448a7d26a7e 2024-11-17T01:26:53,156 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into e296e700584344b3bf488448a7d26a7e(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:53,156 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:53,156 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806813089; duration=0sec 2024-11-17T01:26:53,156 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:53,156 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:53,159 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#98 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:53,160 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a18bbfbb048e42dc97d3e358f23863c0 is 50, key is test_row_0/C:col10/1731806812188/Put/seqid=0 2024-11-17T01:26:53,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741937_1113 (size=13391) 2024-11-17T01:26:53,177 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/a18bbfbb048e42dc97d3e358f23863c0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a18bbfbb048e42dc97d3e358f23863c0 2024-11-17T01:26:53,183 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into a18bbfbb048e42dc97d3e358f23863c0(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:53,184 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:53,184 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806813090; duration=0sec 2024-11-17T01:26:53,184 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:53,184 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:53,217 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:53,217 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-17T01:26:53,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:53,218 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:26:53,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:53,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:53,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:53,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:53,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/d63c5a30d6d5407085be0fe736963a1f is 50, key is test_row_0/A:col10/1731806812208/Put/seqid=0 2024-11-17T01:26:53,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741938_1114 (size=9857) 2024-11-17T01:26:53,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:53,342 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:53,383 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:53,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 460 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806873383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:53,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:53,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 462 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806873485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:53,630 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/d63c5a30d6d5407085be0fe736963a1f 2024-11-17T01:26:53,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/44f07afcc6ac4ad5ab2932879a81310b is 50, key is test_row_0/B:col10/1731806812208/Put/seqid=0 2024-11-17T01:26:53,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741939_1115 (size=9857) 2024-11-17T01:26:53,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:53,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 464 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806873688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:53,990 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:53,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 466 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806873990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:54,049 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/44f07afcc6ac4ad5ab2932879a81310b 2024-11-17T01:26:54,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/49b785e4f9774c59ae04aefee86fd1a7 is 50, key is test_row_0/C:col10/1731806812208/Put/seqid=0 2024-11-17T01:26:54,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741940_1116 (size=9857) 2024-11-17T01:26:54,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:54,468 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=459 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/49b785e4f9774c59ae04aefee86fd1a7 2024-11-17T01:26:54,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/d63c5a30d6d5407085be0fe736963a1f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f 2024-11-17T01:26:54,477 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f, entries=100, sequenceid=459, filesize=9.6 K 2024-11-17T01:26:54,479 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/44f07afcc6ac4ad5ab2932879a81310b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b 2024-11-17T01:26:54,485 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b, entries=100, sequenceid=459, filesize=9.6 K 2024-11-17T01:26:54,487 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/49b785e4f9774c59ae04aefee86fd1a7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7 2024-11-17T01:26:54,493 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7, entries=100, sequenceid=459, filesize=9.6 K 2024-11-17T01:26:54,494 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 733401ed1ccb71c159e3f227c30cedc7 in 1276ms, sequenceid=459, compaction requested=false 2024-11-17T01:26:54,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:54,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:54,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-17T01:26:54,495 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:54,495 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:54,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-17T01:26:54,499 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5090 sec 2024-11-17T01:26:54,501 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/39d8d96c7a8341d4af240856c188a279 is 50, key is test_row_0/A:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:54,501 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.5160 sec 2024-11-17T01:26:54,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741941_1117 (size=14741) 2024-11-17T01:26:54,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:54,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 477 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806874521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:54,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:54,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 479 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806874624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:54,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:54,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 481 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806874827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:54,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/39d8d96c7a8341d4af240856c188a279 2024-11-17T01:26:54,917 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/43a9564e989848079f37c75b3e2cd502 is 50, key is test_row_0/B:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:54,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741942_1118 (size=12301) 2024-11-17T01:26:55,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:55,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 483 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806875130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:55,322 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/43a9564e989848079f37c75b3e2cd502 2024-11-17T01:26:55,330 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d875c9f1ef0044e89baa860f6d684baa is 50, key is test_row_0/C:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:55,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741943_1119 (size=12301) 2024-11-17T01:26:55,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:55,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 485 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806875632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:55,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=484 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d875c9f1ef0044e89baa860f6d684baa 2024-11-17T01:26:55,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/39d8d96c7a8341d4af240856c188a279 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279 2024-11-17T01:26:55,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279, entries=200, sequenceid=484, filesize=14.4 K 2024-11-17T01:26:55,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/43a9564e989848079f37c75b3e2cd502 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502 2024-11-17T01:26:55,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502, entries=150, sequenceid=484, filesize=12.0 K 2024-11-17T01:26:55,780 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/d875c9f1ef0044e89baa860f6d684baa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa 2024-11-17T01:26:55,785 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa, entries=150, sequenceid=484, filesize=12.0 K 2024-11-17T01:26:55,786 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 733401ed1ccb71c159e3f227c30cedc7 in 1291ms, sequenceid=484, compaction requested=true 2024-11-17T01:26:55,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:55,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:55,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:55,786 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:55,786 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:55,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:55,786 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:55,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:55,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:55,787 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37989 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:55,787 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:55,787 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:55,788 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/015345933d034a488015837d033c4f40, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.1 K 2024-11-17T01:26:55,788 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:55,788 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:55,788 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:55,788 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e296e700584344b3bf488448a7d26a7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=34.7 K 2024-11-17T01:26:55,788 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e296e700584344b3bf488448a7d26a7e, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812179 2024-11-17T01:26:55,788 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 015345933d034a488015837d033c4f40, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812179 2024-11-17T01:26:55,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting d63c5a30d6d5407085be0fe736963a1f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1731806812208 2024-11-17T01:26:55,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 44f07afcc6ac4ad5ab2932879a81310b, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1731806812208 2024-11-17T01:26:55,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 43a9564e989848079f37c75b3e2cd502, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:55,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39d8d96c7a8341d4af240856c188a279, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:55,797 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#105 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:55,797 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/5291758a84c041bca27b64f421b28b61 is 50, key is test_row_0/B:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:55,808 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#106 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:55,809 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/89a6808aeca64d6eadbfb6c22e879d95 is 50, key is test_row_0/A:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:55,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741944_1120 (size=13493) 2024-11-17T01:26:55,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741945_1121 (size=13493) 2024-11-17T01:26:55,825 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/5291758a84c041bca27b64f421b28b61 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/5291758a84c041bca27b64f421b28b61 2024-11-17T01:26:55,828 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/89a6808aeca64d6eadbfb6c22e879d95 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89a6808aeca64d6eadbfb6c22e879d95 2024-11-17T01:26:55,832 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 5291758a84c041bca27b64f421b28b61(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:55,832 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:55,832 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806815786; duration=0sec 2024-11-17T01:26:55,833 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:55,833 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:55,833 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:55,835 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35549 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:55,835 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:55,835 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:55,835 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a18bbfbb048e42dc97d3e358f23863c0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=34.7 K 2024-11-17T01:26:55,835 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 89a6808aeca64d6eadbfb6c22e879d95(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:55,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:55,836 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806815786; duration=0sec 2024-11-17T01:26:55,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:55,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:55,836 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a18bbfbb048e42dc97d3e358f23863c0, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1731806812179 2024-11-17T01:26:55,836 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 49b785e4f9774c59ae04aefee86fd1a7, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=459, earliestPutTs=1731806812208 2024-11-17T01:26:55,837 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d875c9f1ef0044e89baa860f6d684baa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:55,845 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#107 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:55,845 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/767fd4487c0345039da04174e00ccb72 is 50, key is test_row_0/C:col10/1731806813354/Put/seqid=0 2024-11-17T01:26:55,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741946_1122 (size=13493) 2024-11-17T01:26:56,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-17T01:26:56,091 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-17T01:26:56,092 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:56,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-17T01:26:56,093 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:56,093 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:56,094 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:56,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:56,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:56,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:56,246 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:56,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:56,247 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:56,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/98692fa57d38434497076e4236ca7bf9 is 50, key is test_row_0/A:col10/1731806814509/Put/seqid=0 2024-11-17T01:26:56,257 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/767fd4487c0345039da04174e00ccb72 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/767fd4487c0345039da04174e00ccb72 2024-11-17T01:26:56,262 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 767fd4487c0345039da04174e00ccb72(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:56,262 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:56,262 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806815786; duration=0sec 2024-11-17T01:26:56,263 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:56,263 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:56,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741947_1123 (size=12301) 2024-11-17T01:26:56,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:56,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:56,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:56,677 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/98692fa57d38434497076e4236ca7bf9 2024-11-17T01:26:56,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4845cf6828934250ae5120971a1d989e is 50, key is test_row_0/B:col10/1731806814509/Put/seqid=0 2024-11-17T01:26:56,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:56,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741948_1124 (size=12301) 2024-11-17T01:26:56,717 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:56,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 509 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806876716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:56,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:56,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 511 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806876819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:57,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:57,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 513 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806877021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:57,099 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4845cf6828934250ae5120971a1d989e 2024-11-17T01:26:57,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1058097916c84a3dbd1cf1c66d6aa094 is 50, key is test_row_0/C:col10/1731806814509/Put/seqid=0 2024-11-17T01:26:57,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741949_1125 (size=12301) 2024-11-17T01:26:57,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:57,326 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:57,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 515 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806877324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:57,512 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=497 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1058097916c84a3dbd1cf1c66d6aa094 2024-11-17T01:26:57,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/98692fa57d38434497076e4236ca7bf9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9 2024-11-17T01:26:57,523 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9, entries=150, sequenceid=497, filesize=12.0 K 2024-11-17T01:26:57,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/4845cf6828934250ae5120971a1d989e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e 2024-11-17T01:26:57,529 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e, entries=150, sequenceid=497, filesize=12.0 K 2024-11-17T01:26:57,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/1058097916c84a3dbd1cf1c66d6aa094 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094 2024-11-17T01:26:57,536 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094, entries=150, sequenceid=497, filesize=12.0 K 2024-11-17T01:26:57,537 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 733401ed1ccb71c159e3f227c30cedc7 in 1291ms, sequenceid=497, compaction requested=false 2024-11-17T01:26:57,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:57,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:57,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-17T01:26:57,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-17T01:26:57,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-17T01:26:57,540 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4450 sec 2024-11-17T01:26:57,541 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.4480 sec 2024-11-17T01:26:57,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:57,831 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:57,832 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:57,837 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7a264f98e8f9459e8cb326f6d51d5a3b is 50, key is test_row_0/A:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:57,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741950_1126 (size=12301) 2024-11-17T01:26:57,842 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7a264f98e8f9459e8cb326f6d51d5a3b 2024-11-17T01:26:57,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/85456b6bc35649d59038380649dc35f7 is 50, key is test_row_0/B:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:57,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:57,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 525 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806877851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:57,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741951_1127 (size=12301) 2024-11-17T01:26:57,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/85456b6bc35649d59038380649dc35f7 2024-11-17T01:26:57,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7b76d3fd523b4a57ac0b0af405313637 is 50, key is test_row_0/C:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:57,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741952_1128 (size=12301) 2024-11-17T01:26:57,958 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:57,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 527 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806877955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 529 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806878160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-17T01:26:58,199 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-17T01:26:58,200 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:26:58,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-17T01:26:58,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:26:58,202 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:26:58,202 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:26:58,202 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:26:58,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53582 deadline: 1731806878256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,259 DEBUG [Thread-153 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18257 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:58,274 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=524 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7b76d3fd523b4a57ac0b0af405313637 2024-11-17T01:26:58,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7a264f98e8f9459e8cb326f6d51d5a3b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b 2024-11-17T01:26:58,284 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b, entries=150, sequenceid=524, filesize=12.0 K 2024-11-17T01:26:58,284 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/85456b6bc35649d59038380649dc35f7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7 2024-11-17T01:26:58,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53600 deadline: 1731806878285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,287 DEBUG [Thread-149 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18282 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:26:58,289 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7, entries=150, sequenceid=524, filesize=12.0 K 2024-11-17T01:26:58,290 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/7b76d3fd523b4a57ac0b0af405313637 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637 2024-11-17T01:26:58,295 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637, entries=150, sequenceid=524, filesize=12.0 K 2024-11-17T01:26:58,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 733401ed1ccb71c159e3f227c30cedc7 in 465ms, sequenceid=524, compaction requested=true 2024-11-17T01:26:58,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:58,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:26:58,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:58,296 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:58,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:26:58,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:58,297 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:58,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:26:58,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:58,298 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:58,298 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:58,298 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:26:58,298 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:26:58,298 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,298 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,298 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89a6808aeca64d6eadbfb6c22e879d95, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.2 K 2024-11-17T01:26:58,298 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/5291758a84c041bca27b64f421b28b61, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.2 K 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5291758a84c041bca27b64f421b28b61, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89a6808aeca64d6eadbfb6c22e879d95, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4845cf6828934250ae5120971a1d989e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1731806814509 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98692fa57d38434497076e4236ca7bf9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1731806814509 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 85456b6bc35649d59038380649dc35f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:26:58,299 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a264f98e8f9459e8cb326f6d51d5a3b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:26:58,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:26:58,307 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#114 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:58,308 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#115 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:58,308 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c01f3f584e484d01925234c4f4a91176 is 50, key is test_row_0/B:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:58,309 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/8a5be92e92b243fd93e32e7924c9011a is 50, key is test_row_0/A:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:58,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741954_1130 (size=13595) 2024-11-17T01:26:58,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741953_1129 (size=13595) 2024-11-17T01:26:58,327 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/8a5be92e92b243fd93e32e7924c9011a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/8a5be92e92b243fd93e32e7924c9011a 2024-11-17T01:26:58,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:58,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:58,335 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 8a5be92e92b243fd93e32e7924c9011a(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:58,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:58,336 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=13, startTime=1731806818296; duration=0sec 2024-11-17T01:26:58,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:26:58,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:26:58,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:26:58,337 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38095 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:26:58,338 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:26:58,338 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,338 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/767fd4487c0345039da04174e00ccb72, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=37.2 K 2024-11-17T01:26:58,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a85e592dddea40beb3dc9a82dee7efc6 is 50, key is test_row_0/A:col10/1731806817848/Put/seqid=0 2024-11-17T01:26:58,338 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 767fd4487c0345039da04174e00ccb72, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=484, earliestPutTs=1731806813354 2024-11-17T01:26:58,339 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1058097916c84a3dbd1cf1c66d6aa094, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=497, earliestPutTs=1731806814509 2024-11-17T01:26:58,340 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b76d3fd523b4a57ac0b0af405313637, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:26:58,353 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#117 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:26:58,353 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/074fdee9562040e0984e0b88bb0fd828 is 50, key is test_row_0/C:col10/1731806816705/Put/seqid=0 2024-11-17T01:26:58,354 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,354 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:58,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:58,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,355 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741955_1131 (size=12301) 2024-11-17T01:26:58,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a85e592dddea40beb3dc9a82dee7efc6 2024-11-17T01:26:58,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/0cd8d0b583fa415aad64b4c8fbc17b67 is 50, key is test_row_0/B:col10/1731806817848/Put/seqid=0 2024-11-17T01:26:58,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741956_1132 (size=13595) 2024-11-17T01:26:58,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741957_1133 (size=12301) 2024-11-17T01:26:58,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806878417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806878419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 531 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806878463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:26:58,508 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:58,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:58,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,508 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806878521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806878525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,661 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:58,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:58,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,662 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,727 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/c01f3f584e484d01925234c4f4a91176 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c01f3f584e484d01925234c4f4a91176 2024-11-17T01:26:58,728 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806878726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,733 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into c01f3f584e484d01925234c4f4a91176(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:58,733 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:58,733 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=13, startTime=1731806818296; duration=0sec 2024-11-17T01:26:58,733 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:58,733 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:26:58,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806878730, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,784 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/074fdee9562040e0984e0b88bb0fd828 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/074fdee9562040e0984e0b88bb0fd828 2024-11-17T01:26:58,785 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/0cd8d0b583fa415aad64b4c8fbc17b67 2024-11-17T01:26:58,789 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 074fdee9562040e0984e0b88bb0fd828(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:26:58,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:58,789 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=13, startTime=1731806818297; duration=0sec 2024-11-17T01:26:58,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:26:58,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:26:58,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e79513d0ad05487c8803b3dd043f549c is 50, key is test_row_0/C:col10/1731806817848/Put/seqid=0 2024-11-17T01:26:58,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741958_1134 (size=12301) 2024-11-17T01:26:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:26:58,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,815 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:58,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:58,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,815 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,969 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:58,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 533 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806878968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:58,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:58,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:58,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:58,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:58,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:59,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806879030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806879035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,127 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:59,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:59,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:59,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:59,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:59,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:59,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:26:59,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=536 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e79513d0ad05487c8803b3dd043f549c 2024-11-17T01:26:59,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a85e592dddea40beb3dc9a82dee7efc6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6 2024-11-17T01:26:59,206 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6, entries=150, sequenceid=536, filesize=12.0 K 2024-11-17T01:26:59,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/0cd8d0b583fa415aad64b4c8fbc17b67 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67 2024-11-17T01:26:59,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67, entries=150, sequenceid=536, filesize=12.0 K 2024-11-17T01:26:59,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/e79513d0ad05487c8803b3dd043f549c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c 2024-11-17T01:26:59,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c, entries=150, sequenceid=536, filesize=12.0 K 2024-11-17T01:26:59,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 733401ed1ccb71c159e3f227c30cedc7 in 884ms, sequenceid=536, compaction requested=false 2024-11-17T01:26:59,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:26:59,280 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-17T01:26:59,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:26:59,281 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:26:59,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:26:59,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:59,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:26:59,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:59,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:26:59,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:26:59,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7f0edf0118f245899d7ea4fc89e986ab is 50, key is test_row_0/A:col10/1731806818417/Put/seqid=0 2024-11-17T01:26:59,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741959_1135 (size=12301) 2024-11-17T01:26:59,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:26:59,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. as already flushing 2024-11-17T01:26:59,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:26:59,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806879547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806879551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,654 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806879652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806879653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,691 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7f0edf0118f245899d7ea4fc89e986ab 2024-11-17T01:26:59,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a3db68f8bc5e4e56903291654963342a is 50, key is test_row_0/B:col10/1731806818417/Put/seqid=0 2024-11-17T01:26:59,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741960_1136 (size=12301) 2024-11-17T01:26:59,791 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:63898 2024-11-17T01:26:59,791 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b5f27aa to 127.0.0.1:63898 2024-11-17T01:26:59,791 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:63898 2024-11-17T01:26:59,791 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:26:59,791 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:26:59,791 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:26:59,796 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:63898 2024-11-17T01:26:59,796 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:26:59,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806879856, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806879857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:26:59,949 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:26:59,974 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:26:59,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 535 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53616 deadline: 1731806879974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:00,105 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a3db68f8bc5e4e56903291654963342a 2024-11-17T01:27:00,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/75db64e98b0049249808a0b9e77bfe2d is 50, key is test_row_0/C:col10/1731806818417/Put/seqid=0 2024-11-17T01:27:00,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741961_1137 (size=12301) 2024-11-17T01:27:00,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:00,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:00,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53644 deadline: 1731806880159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:00,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:53586 deadline: 1731806880159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:00,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:27:00,524 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/75db64e98b0049249808a0b9e77bfe2d 2024-11-17T01:27:00,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/7f0edf0118f245899d7ea4fc89e986ab as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab 2024-11-17T01:27:00,542 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab, entries=150, sequenceid=563, filesize=12.0 K 2024-11-17T01:27:00,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/a3db68f8bc5e4e56903291654963342a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a 2024-11-17T01:27:00,550 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a, entries=150, sequenceid=563, filesize=12.0 K 2024-11-17T01:27:00,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/75db64e98b0049249808a0b9e77bfe2d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d 2024-11-17T01:27:00,557 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d, entries=150, sequenceid=563, filesize=12.0 K 2024-11-17T01:27:00,558 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 733401ed1ccb71c159e3f227c30cedc7 in 1277ms, sequenceid=563, compaction requested=true 2024-11-17T01:27:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:00,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-17T01:27:00,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-17T01:27:00,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-17T01:27:00,561 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3570 sec 2024-11-17T01:27:00,563 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 2.3620 sec 2024-11-17T01:27:00,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:27:00,664 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-17T01:27:00,664 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048068a5 to 127.0.0.1:63898 2024-11-17T01:27:00,664 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:63898 2024-11-17T01:27:00,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:27:00,664 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:00,664 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:27:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:27:00,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:00,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/b9c231d9632d4eb09895a7202e1df34f is 50, key is test_row_0/A:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:00,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741962_1138 (size=12301) 2024-11-17T01:27:01,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/b9c231d9632d4eb09895a7202e1df34f 2024-11-17T01:27:01,092 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9f691e1c3d7f4771bf92bd3545beaff4 is 50, key is test_row_0/B:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:01,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741963_1139 (size=12301) 2024-11-17T01:27:01,498 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9f691e1c3d7f4771bf92bd3545beaff4 2024-11-17T01:27:01,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fbc38c9c493944e78e9d55ab9654da7c is 50, key is test_row_0/C:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:01,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741964_1140 (size=12301) 2024-11-17T01:27:01,917 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=577 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fbc38c9c493944e78e9d55ab9654da7c 2024-11-17T01:27:01,928 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/b9c231d9632d4eb09895a7202e1df34f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f 2024-11-17T01:27:01,933 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f, entries=150, sequenceid=577, filesize=12.0 K 2024-11-17T01:27:01,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/9f691e1c3d7f4771bf92bd3545beaff4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4 2024-11-17T01:27:01,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4, entries=150, sequenceid=577, filesize=12.0 K 2024-11-17T01:27:01,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/fbc38c9c493944e78e9d55ab9654da7c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c 2024-11-17T01:27:01,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c, entries=150, sequenceid=577, filesize=12.0 K 2024-11-17T01:27:01,945 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=0 B/0 for 733401ed1ccb71c159e3f227c30cedc7 in 1281ms, sequenceid=577, compaction requested=true 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:01,946 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:01,946 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 733401ed1ccb71c159e3f227c30cedc7:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:01,946 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:01,947 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:01,947 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:01,947 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/A is initiating minor compaction (all files) 2024-11-17T01:27:01,947 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/B is initiating minor compaction (all files) 2024-11-17T01:27:01,947 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/A in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:01,947 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/B in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:01,947 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/8a5be92e92b243fd93e32e7924c9011a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=49.3 K 2024-11-17T01:27:01,947 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c01f3f584e484d01925234c4f4a91176, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=49.3 K 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a5be92e92b243fd93e32e7924c9011a, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c01f3f584e484d01925234c4f4a91176, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a85e592dddea40beb3dc9a82dee7efc6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1731806817848 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0cd8d0b583fa415aad64b4c8fbc17b67, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1731806817848 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f0edf0118f245899d7ea4fc89e986ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1731806818414 2024-11-17T01:27:01,948 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a3db68f8bc5e4e56903291654963342a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1731806818414 2024-11-17T01:27:01,949 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b9c231d9632d4eb09895a7202e1df34f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1731806819543 2024-11-17T01:27:01,949 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f691e1c3d7f4771bf92bd3545beaff4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1731806819543 2024-11-17T01:27:01,957 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#A#compaction#126 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:01,958 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:01,958 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/19cc63c5db2541d2af3cdc556235dd1a is 50, key is test_row_0/A:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:01,958 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/98842a38c5c44e96bc9afe8734cfe4a8 is 50, key is test_row_0/B:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:01,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741965_1141 (size=13731) 2024-11-17T01:27:01,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741966_1142 (size=13731) 2024-11-17T01:27:01,989 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:63898 2024-11-17T01:27:01,989 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:02,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-17T01:27:02,309 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-17T01:27:02,376 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/19cc63c5db2541d2af3cdc556235dd1a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/19cc63c5db2541d2af3cdc556235dd1a 2024-11-17T01:27:02,376 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/98842a38c5c44e96bc9afe8734cfe4a8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/98842a38c5c44e96bc9afe8734cfe4a8 2024-11-17T01:27:02,381 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/B of 733401ed1ccb71c159e3f227c30cedc7 into 98842a38c5c44e96bc9afe8734cfe4a8(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:02,381 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/A of 733401ed1ccb71c159e3f227c30cedc7 into 19cc63c5db2541d2af3cdc556235dd1a(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:02,381 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:02,381 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:02,382 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/B, priority=12, startTime=1731806821946; duration=0sec 2024-11-17T01:27:02,382 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/A, priority=12, startTime=1731806821946; duration=0sec 2024-11-17T01:27:02,382 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:02,382 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:B 2024-11-17T01:27:02,382 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:02,382 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:02,382 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:A 2024-11-17T01:27:02,383 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50498 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:02,383 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 733401ed1ccb71c159e3f227c30cedc7/C is initiating minor compaction (all files) 2024-11-17T01:27:02,383 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 733401ed1ccb71c159e3f227c30cedc7/C in TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:02,383 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/074fdee9562040e0984e0b88bb0fd828, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp, totalSize=49.3 K 2024-11-17T01:27:02,384 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 074fdee9562040e0984e0b88bb0fd828, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=524, earliestPutTs=1731806816705 2024-11-17T01:27:02,384 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e79513d0ad05487c8803b3dd043f549c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=536, earliestPutTs=1731806817848 2024-11-17T01:27:02,384 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 75db64e98b0049249808a0b9e77bfe2d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=563, earliestPutTs=1731806818414 2024-11-17T01:27:02,384 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting fbc38c9c493944e78e9d55ab9654da7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=577, earliestPutTs=1731806819543 2024-11-17T01:27:02,393 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 733401ed1ccb71c159e3f227c30cedc7#C#compaction#128 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:02,393 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4d4151e66f5042eeab9491e95637b3f5 is 50, key is test_row_0/C:col10/1731806819543/Put/seqid=0 2024-11-17T01:27:02,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741967_1143 (size=13731) 2024-11-17T01:27:02,810 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/4d4151e66f5042eeab9491e95637b3f5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4d4151e66f5042eeab9491e95637b3f5 2024-11-17T01:27:02,816 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 733401ed1ccb71c159e3f227c30cedc7/C of 733401ed1ccb71c159e3f227c30cedc7 into 4d4151e66f5042eeab9491e95637b3f5(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:02,816 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:02,816 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7., storeName=733401ed1ccb71c159e3f227c30cedc7/C, priority=12, startTime=1731806821946; duration=0sec 2024-11-17T01:27:02,817 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:02,817 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 733401ed1ccb71c159e3f227c30cedc7:C 2024-11-17T01:27:08,092 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-17T01:27:08,094 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-17T01:27:08,313 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1324ee83 to 127.0.0.1:63898 2024-11-17T01:27:08,313 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:08,383 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53b8a93e to 127.0.0.1:63898 2024-11-17T01:27:08,384 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:08,384 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:27:08,384 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-17T01:27:08,384 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 393 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 8 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 21 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 24 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6790 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7073 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2954 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8862 rows 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2959 2024-11-17T01:27:08,385 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 8869 rows 2024-11-17T01:27:08,385 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:27:08,386 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63607639 to 127.0.0.1:63898 2024-11-17T01:27:08,386 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:08,391 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:27:08,395 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:27:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:08,402 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806828401"}]},"ts":"1731806828401"} 2024-11-17T01:27:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:08,403 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:27:08,415 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:27:08,417 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:27:08,423 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, UNASSIGN}] 2024-11-17T01:27:08,424 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, UNASSIGN 2024-11-17T01:27:08,425 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=733401ed1ccb71c159e3f227c30cedc7, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:08,426 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:27:08,426 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:08,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:08,582 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:08,583 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:27:08,583 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 733401ed1ccb71c159e3f227c30cedc7, disabling compactions & flushes 2024-11-17T01:27:08,584 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. after waiting 0 ms 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:08,584 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 733401ed1ccb71c159e3f227c30cedc7 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=A 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=B 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 733401ed1ccb71c159e3f227c30cedc7, store=C 2024-11-17T01:27:08,584 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:08,588 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a2f864b87a874862a8da27a97d0a8b52 is 50, key is test_row_1/A:col10/1731806828309/Put/seqid=0 2024-11-17T01:27:08,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741968_1144 (size=9857) 2024-11-17T01:27:08,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:08,994 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a2f864b87a874862a8da27a97d0a8b52 2024-11-17T01:27:09,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:09,010 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/71cfc386a6c3444ab362eb1cf15a2cc2 is 50, key is test_row_1/B:col10/1731806828309/Put/seqid=0 2024-11-17T01:27:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741969_1145 (size=9857) 2024-11-17T01:27:09,416 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/71cfc386a6c3444ab362eb1cf15a2cc2 2024-11-17T01:27:09,431 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/81e23eb45d1a4d6fb244109cada60a9c is 50, key is test_row_1/C:col10/1731806828309/Put/seqid=0 2024-11-17T01:27:09,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741970_1146 (size=9857) 2024-11-17T01:27:09,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:09,837 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/81e23eb45d1a4d6fb244109cada60a9c 2024-11-17T01:27:09,850 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/A/a2f864b87a874862a8da27a97d0a8b52 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a2f864b87a874862a8da27a97d0a8b52 2024-11-17T01:27:09,857 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a2f864b87a874862a8da27a97d0a8b52, entries=100, sequenceid=586, filesize=9.6 K 2024-11-17T01:27:09,858 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/B/71cfc386a6c3444ab362eb1cf15a2cc2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/71cfc386a6c3444ab362eb1cf15a2cc2 2024-11-17T01:27:09,863 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/71cfc386a6c3444ab362eb1cf15a2cc2, entries=100, sequenceid=586, filesize=9.6 K 2024-11-17T01:27:09,864 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/.tmp/C/81e23eb45d1a4d6fb244109cada60a9c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/81e23eb45d1a4d6fb244109cada60a9c 2024-11-17T01:27:09,868 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/81e23eb45d1a4d6fb244109cada60a9c, entries=100, sequenceid=586, filesize=9.6 K 2024-11-17T01:27:09,869 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 733401ed1ccb71c159e3f227c30cedc7 in 1285ms, sequenceid=586, compaction requested=false 2024-11-17T01:27:09,869 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4117b278a9b74fbcb6876ca0711d11f0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/49e1e069455349cd9a555ae5c0408d4a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/6f31e57a3b09404980f6d98f64589e7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/1c5318a31bea418fa9e0deb7cbd6618a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/5a1ad0cebbd7422eb06620e9a154a55d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b980786db294440a91660afde9d8d87c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/80de5c968863443bbe74e3afba402841, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc1e8b757c8d49dfad15efc0c62251bf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/015345933d034a488015837d033c4f40, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89a6808aeca64d6eadbfb6c22e879d95, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/8a5be92e92b243fd93e32e7924c9011a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f] to archive 2024-11-17T01:27:09,873 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:09,878 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a89265a5b4904c84b4b9f85bfa7e4a6f 2024-11-17T01:27:09,879 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/da8dcf6fb30c4b2097072b9fda58cc05 2024-11-17T01:27:09,881 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/29f67f76eab34cc494388e8d733fac83 2024-11-17T01:27:09,882 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4117b278a9b74fbcb6876ca0711d11f0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4117b278a9b74fbcb6876ca0711d11f0 2024-11-17T01:27:09,883 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f7daf7d338ad4d19b9b1b30912a852de 2024-11-17T01:27:09,885 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f531c77b63049e0b8cf67a8879bef99 2024-11-17T01:27:09,886 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/72375699d99a40a484fa23f79d7eb28b 2024-11-17T01:27:09,887 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/49e1e069455349cd9a555ae5c0408d4a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/49e1e069455349cd9a555ae5c0408d4a 2024-11-17T01:27:09,888 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/f3d876e5876d4da2bc916207ec739e0c 2024-11-17T01:27:09,889 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4be2eafebad14c00b35e2541fdf2fa9c 2024-11-17T01:27:09,890 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d9f0e9ad46c544e2bf5e2b5dbdcca9fb 2024-11-17T01:27:09,892 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/fa5666865edf41af84c35c43a238a165 2024-11-17T01:27:09,892 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/28b84e8d41104576b0e3cca38fc6397f 2024-11-17T01:27:09,893 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/104396edf912489a8036fa7332f2da01 2024-11-17T01:27:09,894 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/6f31e57a3b09404980f6d98f64589e7e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/6f31e57a3b09404980f6d98f64589e7e 2024-11-17T01:27:09,895 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89b2a0687c4f404da53e51cb51aeb751 2024-11-17T01:27:09,896 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/91e358e5e96e4894a5c4bc8176179008 2024-11-17T01:27:09,897 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/1c5318a31bea418fa9e0deb7cbd6618a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/1c5318a31bea418fa9e0deb7cbd6618a 2024-11-17T01:27:09,898 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/0f5bdfcd40fa42fdb21fe863eee83781 2024-11-17T01:27:09,899 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/21b4b5530fac4d3e92a9d61b12366520 2024-11-17T01:27:09,900 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/5a1ad0cebbd7422eb06620e9a154a55d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/5a1ad0cebbd7422eb06620e9a154a55d 2024-11-17T01:27:09,901 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/af6e485ddece409eb54064b9ca96e92f 2024-11-17T01:27:09,902 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7cf8be94018f4526885476ed92fb582c 2024-11-17T01:27:09,903 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b980786db294440a91660afde9d8d87c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b980786db294440a91660afde9d8d87c 2024-11-17T01:27:09,904 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc50fed2d4954c23913f3d60d8c20bb8 2024-11-17T01:27:09,905 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/ca932dc980294ac3ac5a02ec9374e439 2024-11-17T01:27:09,906 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/80de5c968863443bbe74e3afba402841 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/80de5c968863443bbe74e3afba402841 2024-11-17T01:27:09,907 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/c22e7226e8e24bc8956105e2e1012bde 2024-11-17T01:27:09,908 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/4b6eff32b4c94167b17f34c0ec347940 2024-11-17T01:27:09,909 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc1e8b757c8d49dfad15efc0c62251bf to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/bc1e8b757c8d49dfad15efc0c62251bf 2024-11-17T01:27:09,910 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/227d302e873048eca534a261d1fe8465 2024-11-17T01:27:09,911 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/14430deec54242b298d4ef8e983b5df3 2024-11-17T01:27:09,912 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/015345933d034a488015837d033c4f40 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/015345933d034a488015837d033c4f40 2024-11-17T01:27:09,913 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/d63c5a30d6d5407085be0fe736963a1f 2024-11-17T01:27:09,914 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/39d8d96c7a8341d4af240856c188a279 2024-11-17T01:27:09,915 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89a6808aeca64d6eadbfb6c22e879d95 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/89a6808aeca64d6eadbfb6c22e879d95 2024-11-17T01:27:09,916 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/98692fa57d38434497076e4236ca7bf9 2024-11-17T01:27:09,917 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/8a5be92e92b243fd93e32e7924c9011a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/8a5be92e92b243fd93e32e7924c9011a 2024-11-17T01:27:09,919 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7a264f98e8f9459e8cb326f6d51d5a3b 2024-11-17T01:27:09,920 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a85e592dddea40beb3dc9a82dee7efc6 2024-11-17T01:27:09,921 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/7f0edf0118f245899d7ea4fc89e986ab 2024-11-17T01:27:09,922 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/b9c231d9632d4eb09895a7202e1df34f 2024-11-17T01:27:09,936 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/77115b882001454387d7e8c5af5b3cc6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/359d7bb6ab2c4d05bbc212172413ab4b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c2eb10be350247d292edce7c7c6c3d44, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9425521c046f4d8db6ca81c170a6b5b5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e5d557bbe9af40c6be6c48706f299e67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/b990dc36d93d436cb58eed7a5acfdf80, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/784265df201c45e8addc1c4d625da8fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e88b8d77a452434985a5a1d0b1a5480d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4da2e44e940d4a818cc206e3ac65320b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e296e700584344b3bf488448a7d26a7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/5291758a84c041bca27b64f421b28b61, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c01f3f584e484d01925234c4f4a91176, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4] to archive 2024-11-17T01:27:09,937 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:09,939 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/aa961b8b10ae499dad6bdca5860115bf 2024-11-17T01:27:09,940 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/409f91929421410aa840d931f08f9c3a 2024-11-17T01:27:09,942 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e32d915eb34d45d2ac6ddc9e4132d5f6 2024-11-17T01:27:09,943 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/77115b882001454387d7e8c5af5b3cc6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/77115b882001454387d7e8c5af5b3cc6 2024-11-17T01:27:09,945 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/49919e7314cb4279aae6a2cb09e9b9b0 2024-11-17T01:27:09,946 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c9b111f31b5b4f5f8b0fa931035970e9 2024-11-17T01:27:09,947 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/359d7bb6ab2c4d05bbc212172413ab4b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/359d7bb6ab2c4d05bbc212172413ab4b 2024-11-17T01:27:09,948 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4c46fb48cd514f039fdead697463ad77 2024-11-17T01:27:09,949 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/bd6a333c92c142f496e207d9a4dc7c67 2024-11-17T01:27:09,951 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/47460f09425c46c0baa804855076f8ed 2024-11-17T01:27:09,952 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c2eb10be350247d292edce7c7c6c3d44 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c2eb10be350247d292edce7c7c6c3d44 2024-11-17T01:27:09,953 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a94bd5312ddf423b9a69934c9cb73d24 2024-11-17T01:27:09,955 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/46a2686437e44a1887064a82545b9b1c 2024-11-17T01:27:09,956 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9425521c046f4d8db6ca81c170a6b5b5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9425521c046f4d8db6ca81c170a6b5b5 2024-11-17T01:27:09,958 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4fdbfacbf0554334b59df1346781dbe3 2024-11-17T01:27:09,959 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/cc86d93b26784249bc91a7c6d2e32e6c 2024-11-17T01:27:09,960 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e5d557bbe9af40c6be6c48706f299e67 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e5d557bbe9af40c6be6c48706f299e67 2024-11-17T01:27:09,961 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/57bc1d480ee44c2ca86550724923f399 2024-11-17T01:27:09,962 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/fc4010d7b06f487a8dcf8127cad93ecb 2024-11-17T01:27:09,963 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/b990dc36d93d436cb58eed7a5acfdf80 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/b990dc36d93d436cb58eed7a5acfdf80 2024-11-17T01:27:09,964 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/02ec49fa90de4d548f9a8bddd04ee38e 2024-11-17T01:27:09,965 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/f7c559e690d24224ad4a9744c2a3112b 2024-11-17T01:27:09,966 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/784265df201c45e8addc1c4d625da8fd to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/784265df201c45e8addc1c4d625da8fd 2024-11-17T01:27:09,967 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/38739dd541ac4a02a3aaf750d255f226 2024-11-17T01:27:09,968 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9931f555f2f74de8ab25f5e79146e0d8 2024-11-17T01:27:09,969 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e88b8d77a452434985a5a1d0b1a5480d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e88b8d77a452434985a5a1d0b1a5480d 2024-11-17T01:27:09,970 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/902ecf25df7e4c20a81c081264877d5c 2024-11-17T01:27:09,971 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/1e6b15d13cc84f21a708de601506125c 2024-11-17T01:27:09,972 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4da2e44e940d4a818cc206e3ac65320b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4da2e44e940d4a818cc206e3ac65320b 2024-11-17T01:27:09,973 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/693b82f9040c40e08b25d23bde122c08 2024-11-17T01:27:09,974 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/962c132baf7f493993dbb3d6fd27e63b 2024-11-17T01:27:09,976 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e296e700584344b3bf488448a7d26a7e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/e296e700584344b3bf488448a7d26a7e 2024-11-17T01:27:09,977 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/469e877155704edc8deca95045652d9c 2024-11-17T01:27:09,978 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/44f07afcc6ac4ad5ab2932879a81310b 2024-11-17T01:27:09,979 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/5291758a84c041bca27b64f421b28b61 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/5291758a84c041bca27b64f421b28b61 2024-11-17T01:27:09,980 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/43a9564e989848079f37c75b3e2cd502 2024-11-17T01:27:09,982 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/4845cf6828934250ae5120971a1d989e 2024-11-17T01:27:09,983 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c01f3f584e484d01925234c4f4a91176 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/c01f3f584e484d01925234c4f4a91176 2024-11-17T01:27:09,984 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/85456b6bc35649d59038380649dc35f7 2024-11-17T01:27:09,985 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/0cd8d0b583fa415aad64b4c8fbc17b67 2024-11-17T01:27:09,986 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/a3db68f8bc5e4e56903291654963342a 2024-11-17T01:27:09,986 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/9f691e1c3d7f4771bf92bd3545beaff4 2024-11-17T01:27:09,988 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4952551225c54902b508f5562d02e019, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/99ba2b644a6749259aebfbac9d79e145, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/bf1244a792024784892c7f9b3392591b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f20b44a2b721400a8da19519329351b4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d928c411332b43489c56e82efbc3bab0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a422b453de854eb7856a2c34e203c199, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/6f91d56450ac43fd8b8394ff45a54ac8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2271384f3fbb417e82ec98e6d8291ac0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3854a5837a754d9cbc36a1cad8b0413b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a18bbfbb048e42dc97d3e358f23863c0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/767fd4487c0345039da04174e00ccb72, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/074fdee9562040e0984e0b88bb0fd828, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c] to archive 2024-11-17T01:27:09,989 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:09,990 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/ffb623cc5cd84779a672ec98ce9d9bad 2024-11-17T01:27:09,991 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2bc3879a31574ba787bd9962a4b955a5 2024-11-17T01:27:09,992 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/0fcffae1390646ddadd60cd562f96c84 2024-11-17T01:27:09,993 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4952551225c54902b508f5562d02e019 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4952551225c54902b508f5562d02e019 2024-11-17T01:27:09,994 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f86ce407dd1e4b479c568e536d25648a 2024-11-17T01:27:09,995 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a7a6bb67af65481ab82f028e39c4a114 2024-11-17T01:27:09,996 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/99ba2b644a6749259aebfbac9d79e145 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/99ba2b644a6749259aebfbac9d79e145 2024-11-17T01:27:09,998 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7eeeb38f29f24e35b8aadc61aacbbeff 2024-11-17T01:27:09,999 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/cd55ccae16bf4a12b8fd4fd8ddd18591 2024-11-17T01:27:10,000 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e3499440d2b47028aa189c3ce1bd6d5 2024-11-17T01:27:10,002 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/bf1244a792024784892c7f9b3392591b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/bf1244a792024784892c7f9b3392591b 2024-11-17T01:27:10,003 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/961c322e72f645f2b510d76de0ada9c4 2024-11-17T01:27:10,004 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4e7a2decfae1429ea1c56b4a6cce5a76 2024-11-17T01:27:10,005 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f20b44a2b721400a8da19519329351b4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/f20b44a2b721400a8da19519329351b4 2024-11-17T01:27:10,006 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d727a4c2d66c445c9123ed9893e54e48 2024-11-17T01:27:10,008 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fc14c90b5d67463888d6cf5b40fc263e 2024-11-17T01:27:10,009 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d928c411332b43489c56e82efbc3bab0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d928c411332b43489c56e82efbc3bab0 2024-11-17T01:27:10,010 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3af32d9280eb4af3adf8a4bef48cbebf 2024-11-17T01:27:10,011 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/eb77af869e1b43fb8e2cf719662c26b6 2024-11-17T01:27:10,012 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a422b453de854eb7856a2c34e203c199 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a422b453de854eb7856a2c34e203c199 2024-11-17T01:27:10,013 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/b61e4f4dd38c4f2f89fa299b2ce87c26 2024-11-17T01:27:10,014 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a5804dc3bd7241fc86da36c591dfc081 2024-11-17T01:27:10,016 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/6f91d56450ac43fd8b8394ff45a54ac8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/6f91d56450ac43fd8b8394ff45a54ac8 2024-11-17T01:27:10,017 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/95159b875dd844e8887d41c2d773774b 2024-11-17T01:27:10,018 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1bd772ff45964fb8b9ee3ecb9eea55c9 2024-11-17T01:27:10,019 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2271384f3fbb417e82ec98e6d8291ac0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/2271384f3fbb417e82ec98e6d8291ac0 2024-11-17T01:27:10,020 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/155ce6ca7ab44f438cefbaa5428bae86 2024-11-17T01:27:10,021 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e5f8cae544cf4ac884118874b069e28f 2024-11-17T01:27:10,022 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3854a5837a754d9cbc36a1cad8b0413b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/3854a5837a754d9cbc36a1cad8b0413b 2024-11-17T01:27:10,023 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/5da5d75de38046e1b9c4e02d90484067 2024-11-17T01:27:10,024 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9bc198ae23944cbf8bd2a84206027e41 2024-11-17T01:27:10,025 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a18bbfbb048e42dc97d3e358f23863c0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/a18bbfbb048e42dc97d3e358f23863c0 2024-11-17T01:27:10,026 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/9de495aac7eb491f84de7071b15db693 2024-11-17T01:27:10,027 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/49b785e4f9774c59ae04aefee86fd1a7 2024-11-17T01:27:10,029 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/767fd4487c0345039da04174e00ccb72 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/767fd4487c0345039da04174e00ccb72 2024-11-17T01:27:10,030 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/d875c9f1ef0044e89baa860f6d684baa 2024-11-17T01:27:10,031 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/1058097916c84a3dbd1cf1c66d6aa094 2024-11-17T01:27:10,032 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/074fdee9562040e0984e0b88bb0fd828 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/074fdee9562040e0984e0b88bb0fd828 2024-11-17T01:27:10,033 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/7b76d3fd523b4a57ac0b0af405313637 2024-11-17T01:27:10,034 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/e79513d0ad05487c8803b3dd043f549c 2024-11-17T01:27:10,035 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/75db64e98b0049249808a0b9e77bfe2d 2024-11-17T01:27:10,037 DEBUG [StoreCloser-TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/fbc38c9c493944e78e9d55ab9654da7c 2024-11-17T01:27:10,041 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/recovered.edits/589.seqid, newMaxSeqId=589, maxSeqId=1 2024-11-17T01:27:10,044 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7. 2024-11-17T01:27:10,044 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 733401ed1ccb71c159e3f227c30cedc7: 2024-11-17T01:27:10,046 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:27:10,046 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=733401ed1ccb71c159e3f227c30cedc7, regionState=CLOSED 2024-11-17T01:27:10,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-17T01:27:10,049 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 733401ed1ccb71c159e3f227c30cedc7, server=04f7e7347dc7,37721,1731806791503 in 1.6220 sec 2024-11-17T01:27:10,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-17T01:27:10,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=733401ed1ccb71c159e3f227c30cedc7, UNASSIGN in 1.6270 sec 2024-11-17T01:27:10,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-17T01:27:10,053 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6350 sec 2024-11-17T01:27:10,054 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806830054"}]},"ts":"1731806830054"} 2024-11-17T01:27:10,055 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:27:10,093 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:27:10,096 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6970 sec 2024-11-17T01:27:10,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-17T01:27:10,512 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-17T01:27:10,521 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:27:10,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,525 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,526 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-17T01:27:10,530 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:27:10,534 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/recovered.edits] 2024-11-17T01:27:10,538 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/19cc63c5db2541d2af3cdc556235dd1a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/19cc63c5db2541d2af3cdc556235dd1a 2024-11-17T01:27:10,540 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a2f864b87a874862a8da27a97d0a8b52 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/A/a2f864b87a874862a8da27a97d0a8b52 2024-11-17T01:27:10,542 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/71cfc386a6c3444ab362eb1cf15a2cc2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/71cfc386a6c3444ab362eb1cf15a2cc2 2024-11-17T01:27:10,544 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/98842a38c5c44e96bc9afe8734cfe4a8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/B/98842a38c5c44e96bc9afe8734cfe4a8 2024-11-17T01:27:10,546 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4d4151e66f5042eeab9491e95637b3f5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/4d4151e66f5042eeab9491e95637b3f5 2024-11-17T01:27:10,548 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/81e23eb45d1a4d6fb244109cada60a9c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/C/81e23eb45d1a4d6fb244109cada60a9c 2024-11-17T01:27:10,551 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/recovered.edits/589.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7/recovered.edits/589.seqid 2024-11-17T01:27:10,552 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/733401ed1ccb71c159e3f227c30cedc7 2024-11-17T01:27:10,552 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:27:10,558 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-17T01:27:10,567 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:27:10,600 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:27:10,601 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,601 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:27:10,602 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806830601"}]},"ts":"9223372036854775807"} 2024-11-17T01:27:10,605 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:27:10,605 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 733401ed1ccb71c159e3f227c30cedc7, NAME => 'TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:27:10,606 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:27:10,606 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806830606"}]},"ts":"9223372036854775807"} 2024-11-17T01:27:10,609 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:27:10,619 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 97 msec 2024-11-17T01:27:10,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-17T01:27:10,628 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-17T01:27:10,639 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=239 (was 219) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_137525728_22 at /127.0.0.1:43154 [Waiting for operation #13] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RS:0;04f7e7347dc7:37721-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=282 (was 153) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4056 (was 4571) 2024-11-17T01:27:10,648 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=239, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=282, ProcessCount=11, AvailableMemoryMB=4053 2024-11-17T01:27:10,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:27:10,651 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:27:10,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:10,652 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:27:10,653 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:10,653 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-11-17T01:27:10,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:10,654 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:27:10,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741971_1147 (size=960) 2024-11-17T01:27:10,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:10,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:11,063 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:27:11,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741972_1148 (size=53) 2024-11-17T01:27:11,174 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-17T01:27:11,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:11,475 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:27:11,475 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 271e8f2d6ca47e8f7f50d0cb275120f4, disabling compactions & flushes 2024-11-17T01:27:11,475 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,475 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,475 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. after waiting 0 ms 2024-11-17T01:27:11,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,476 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,476 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:11,479 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:27:11,480 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806831479"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806831479"}]},"ts":"1731806831479"} 2024-11-17T01:27:11,482 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:27:11,484 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:27:11,484 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806831484"}]},"ts":"1731806831484"} 2024-11-17T01:27:11,486 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:27:11,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, ASSIGN}] 2024-11-17T01:27:11,537 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, ASSIGN 2024-11-17T01:27:11,539 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:27:11,690 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:11,693 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:11,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:11,846 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:11,854 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,854 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:27:11,855 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,855 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:27:11,855 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,855 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,857 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,858 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:11,859 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName A 2024-11-17T01:27:11,859 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:11,860 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:11,860 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,861 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:11,862 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName B 2024-11-17T01:27:11,862 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:11,863 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:11,863 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,865 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:11,865 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName C 2024-11-17T01:27:11,865 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:11,866 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:11,866 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,867 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,867 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,869 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:27:11,869 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:11,871 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:27:11,872 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened 271e8f2d6ca47e8f7f50d0cb275120f4; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64395925, jitterRate=-0.04042594134807587}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:27:11,872 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:11,873 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., pid=37, masterSystemTime=1731806831846 2024-11-17T01:27:11,875 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,875 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:11,875 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:11,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-17T01:27:11,877 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 in 183 msec 2024-11-17T01:27:11,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-17T01:27:11,879 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, ASSIGN in 342 msec 2024-11-17T01:27:11,879 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:27:11,879 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806831879"}]},"ts":"1731806831879"} 2024-11-17T01:27:11,880 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:27:11,890 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:27:11,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2390 sec 2024-11-17T01:27:12,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-17T01:27:12,765 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-11-17T01:27:12,769 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e560c7b to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ddf4c3 2024-11-17T01:27:12,811 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ff872d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:12,815 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:12,818 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52588, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:12,821 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:27:12,824 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60568, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:27:12,831 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:27:12,832 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:27:12,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:12,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741973_1149 (size=996) 2024-11-17T01:27:13,254 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-17T01:27:13,254 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-17T01:27:13,261 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:27:13,271 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, REOPEN/MOVE}] 2024-11-17T01:27:13,272 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, REOPEN/MOVE 2024-11-17T01:27:13,273 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:13,274 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:27:13,274 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:13,426 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:13,427 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,428 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:27:13,428 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 271e8f2d6ca47e8f7f50d0cb275120f4, disabling compactions & flushes 2024-11-17T01:27:13,428 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,428 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,428 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. after waiting 0 ms 2024-11-17T01:27:13,428 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,455 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-17T01:27:13,455 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,455 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:13,455 WARN [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: 271e8f2d6ca47e8f7f50d0cb275120f4 to self. 2024-11-17T01:27:13,457 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,457 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=CLOSED 2024-11-17T01:27:13,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-17T01:27:13,460 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 in 184 msec 2024-11-17T01:27:13,461 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, REOPEN/MOVE; state=CLOSED, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=true 2024-11-17T01:27:13,611 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:13,613 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:13,766 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:13,775 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,775 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:27:13,776 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,776 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:27:13,777 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,777 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,780 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,781 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:13,787 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName A 2024-11-17T01:27:13,789 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:13,790 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:13,790 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,791 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:13,791 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName B 2024-11-17T01:27:13,791 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:13,791 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:13,791 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,792 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:13,792 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 271e8f2d6ca47e8f7f50d0cb275120f4 columnFamilyName C 2024-11-17T01:27:13,792 DEBUG [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:13,793 INFO [StoreOpener-271e8f2d6ca47e8f7f50d0cb275120f4-1 {}] regionserver.HStore(327): Store=271e8f2d6ca47e8f7f50d0cb275120f4/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:13,793 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,793 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,795 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,796 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:27:13,798 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:13,798 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened 271e8f2d6ca47e8f7f50d0cb275120f4; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64766285, jitterRate=-0.03490714728832245}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:27:13,799 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:13,800 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., pid=42, masterSystemTime=1731806833766 2024-11-17T01:27:13,801 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,801 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:13,802 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=OPEN, openSeqNum=5, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:13,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-11-17T01:27:13,804 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 in 190 msec 2024-11-17T01:27:13,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-17T01:27:13,805 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, REOPEN/MOVE in 533 msec 2024-11-17T01:27:13,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-17T01:27:13,809 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 546 msec 2024-11-17T01:27:13,811 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 976 msec 2024-11-17T01:27:13,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-17T01:27:13,819 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c826820 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29458edd 2024-11-17T01:27:13,877 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cae6c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,881 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-11-17T01:27:13,894 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,897 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x176c5c1b to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328f994d 2024-11-17T01:27:13,908 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,911 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-11-17T01:27:13,924 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e904d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,928 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-11-17T01:27:13,942 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,945 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-11-17T01:27:13,957 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,959 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-17T01:27:13,969 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,972 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-17T01:27:13,983 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,985 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-17T01:27:13,994 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:13,998 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:13,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-17T01:27:14,000 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:14,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:14,000 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:14,000 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:14,001 DEBUG [hconnection-0x2ef1ff2f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,001 DEBUG [hconnection-0x538fd024-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,002 DEBUG [hconnection-0x5aca9975-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,002 DEBUG [hconnection-0x2260baec-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,002 DEBUG [hconnection-0x4d89ec67-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,003 DEBUG [hconnection-0xc5d6cf1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,004 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52592, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,004 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52594, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,004 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52636, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,004 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52628, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,005 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52590, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,007 DEBUG [hconnection-0x6c83eb43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,007 DEBUG [hconnection-0x45d78326-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,008 DEBUG [hconnection-0x68100ae9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:14,008 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,009 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,009 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52660, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,009 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52664, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:14,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:14,015 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:14,015 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:14,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:14,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:14,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:14,016 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:14,054 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806894052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806894052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806894054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,055 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806894054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806894055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f4e06c7a8acb4a0ea1f646200eedd570_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806834014/Put/seqid=0 2024-11-17T01:27:14,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741974_1150 (size=17034) 2024-11-17T01:27:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:14,153 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806894157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806894158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,164 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806894159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806894160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,165 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806894160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:14,307 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806894367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,368 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806894367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806894367, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806894368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806894370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,461 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,482 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:14,487 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f4e06c7a8acb4a0ea1f646200eedd570_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f4e06c7a8acb4a0ea1f646200eedd570_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:14,489 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/99733b78145449a6b5040b029f355184, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:14,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/99733b78145449a6b5040b029f355184 is 175, key is test_row_0/A:col10/1731806834014/Put/seqid=0 2024-11-17T01:27:14,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741975_1151 (size=48139) 2024-11-17T01:27:14,510 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/99733b78145449a6b5040b029f355184 2024-11-17T01:27:14,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/12ea5981e5f04bae9549a3ef384fafe4 is 50, key is test_row_0/B:col10/1731806834014/Put/seqid=0 2024-11-17T01:27:14,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741976_1152 (size=12001) 2024-11-17T01:27:14,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:14,614 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,615 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,615 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806894669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,672 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806894671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806894671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806894671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:14,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806894672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,921 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:14,921 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:14,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:14,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:14,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:14,965 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/12ea5981e5f04bae9549a3ef384fafe4 2024-11-17T01:27:14,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/72d019905ed541f8b5eb32b987366ac5 is 50, key is test_row_0/C:col10/1731806834014/Put/seqid=0 2024-11-17T01:27:15,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741977_1153 (size=12001) 2024-11-17T01:27:15,005 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/72d019905ed541f8b5eb32b987366ac5 2024-11-17T01:27:15,010 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/99733b78145449a6b5040b029f355184 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184 2024-11-17T01:27:15,015 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184, entries=250, sequenceid=16, filesize=47.0 K 2024-11-17T01:27:15,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/12ea5981e5f04bae9549a3ef384fafe4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4 2024-11-17T01:27:15,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4, entries=150, sequenceid=16, filesize=11.7 K 2024-11-17T01:27:15,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/72d019905ed541f8b5eb32b987366ac5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5 2024-11-17T01:27:15,031 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5, entries=150, sequenceid=16, filesize=11.7 K 2024-11-17T01:27:15,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1018ms, sequenceid=16, compaction requested=false 2024-11-17T01:27:15,033 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-17T01:27:15,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:15,075 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-17T01:27:15,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,076 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:15,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111708eca7f8ade84acf82d9fc946b8f2050_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806834053/Put/seqid=0 2024-11-17T01:27:15,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:15,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741978_1154 (size=12154) 2024-11-17T01:27:15,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:15,135 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111708eca7f8ade84acf82d9fc946b8f2050_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111708eca7f8ade84acf82d9fc946b8f2050_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1fa5733a71bb4b72a22e1a3b36674de6, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1fa5733a71bb4b72a22e1a3b36674de6 is 175, key is test_row_0/A:col10/1731806834053/Put/seqid=0 2024-11-17T01:27:15,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741979_1155 (size=30955) 2024-11-17T01:27:15,154 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1fa5733a71bb4b72a22e1a3b36674de6 2024-11-17T01:27:15,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/748c3126945e4bb593d5953a56fe48dc is 50, key is test_row_0/B:col10/1731806834053/Put/seqid=0 2024-11-17T01:27:15,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:15,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806895183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,188 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806895184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741980_1156 (size=12001) 2024-11-17T01:27:15,192 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/748c3126945e4bb593d5953a56fe48dc 2024-11-17T01:27:15,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806895188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,193 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806895188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806895189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/c9725dfa1c2d4057b7d0a2e474167fde is 50, key is test_row_0/C:col10/1731806834053/Put/seqid=0 2024-11-17T01:27:15,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741981_1157 (size=12001) 2024-11-17T01:27:15,213 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/c9725dfa1c2d4057b7d0a2e474167fde 2024-11-17T01:27:15,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1fa5733a71bb4b72a22e1a3b36674de6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6 2024-11-17T01:27:15,226 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6, entries=150, sequenceid=40, filesize=30.2 K 2024-11-17T01:27:15,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/748c3126945e4bb593d5953a56fe48dc as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc 2024-11-17T01:27:15,234 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:27:15,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/c9725dfa1c2d4057b7d0a2e474167fde as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde 2024-11-17T01:27:15,245 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:27:15,246 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 171ms, sequenceid=40, compaction requested=false 2024-11-17T01:27:15,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:15,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-17T01:27:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-17T01:27:15,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-17T01:27:15,249 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2470 sec 2024-11-17T01:27:15,251 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 1.2510 sec 2024-11-17T01:27:15,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,292 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-17T01:27:15,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:15,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:15,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,294 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:15,295 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117caca58770bc448788d3ddabd868488ac_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,322 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806895318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,323 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806895319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,323 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806895319, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806895320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,324 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806895320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741982_1158 (size=17034) 2024-11-17T01:27:15,328 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:15,335 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117caca58770bc448788d3ddabd868488ac_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117caca58770bc448788d3ddabd868488ac_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,336 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/eee0e434926b4c45a1c584a63308f79d, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,337 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/eee0e434926b4c45a1c584a63308f79d is 175, key is test_row_0/A:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741983_1159 (size=48139) 2024-11-17T01:27:15,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/eee0e434926b4c45a1c584a63308f79d 2024-11-17T01:27:15,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7c9b7c25ab7946679675ddb1113b7dcd is 50, key is test_row_0/B:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741984_1160 (size=12001) 2024-11-17T01:27:15,373 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7c9b7c25ab7946679675ddb1113b7dcd 2024-11-17T01:27:15,391 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 is 50, key is test_row_0/C:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741985_1161 (size=12001) 2024-11-17T01:27:15,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806895424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806895425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806895426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806895427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806895432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,599 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:27:15,636 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806895634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806895634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,637 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806895635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806895635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,638 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806895635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 2024-11-17T01:27:15,814 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/eee0e434926b4c45a1c584a63308f79d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d 2024-11-17T01:27:15,819 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d, entries=250, sequenceid=55, filesize=47.0 K 2024-11-17T01:27:15,820 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7c9b7c25ab7946679675ddb1113b7dcd as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd 2024-11-17T01:27:15,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd, entries=150, sequenceid=55, filesize=11.7 K 2024-11-17T01:27:15,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 2024-11-17T01:27:15,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1, entries=150, sequenceid=55, filesize=11.7 K 2024-11-17T01:27:15,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 541ms, sequenceid=55, compaction requested=true 2024-11-17T01:27:15,834 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:15,834 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:15,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:15,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:15,835 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:15,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 127233 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:15,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:15,836 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,836 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=124.3 K 2024-11-17T01:27:15,836 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,836 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d] 2024-11-17T01:27:15,836 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:15,836 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:15,836 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,837 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=35.2 K 2024-11-17T01:27:15,837 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 99733b78145449a6b5040b029f355184, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731806834011 2024-11-17T01:27:15,837 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 12ea5981e5f04bae9549a3ef384fafe4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731806834011 2024-11-17T01:27:15,838 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1fa5733a71bb4b72a22e1a3b36674de6, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806834030 2024-11-17T01:27:15,838 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 748c3126945e4bb593d5953a56fe48dc, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806834030 2024-11-17T01:27:15,839 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting eee0e434926b4c45a1c584a63308f79d, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:15,839 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 7c9b7c25ab7946679675ddb1113b7dcd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:15,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:15,850 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#141 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:15,851 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76 is 50, key is test_row_0/B:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,851 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,856 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111738c17ba543ff46318361f132c0b2385d_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,864 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111738c17ba543ff46318361f132c0b2385d_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,864 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111738c17ba543ff46318361f132c0b2385d_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741986_1162 (size=12104) 2024-11-17T01:27:15,872 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76 2024-11-17T01:27:15,903 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into 5dfaff13bd0f4cd687d7f7d4ddb5dc76(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:15,903 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:15,903 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=13, startTime=1731806835835; duration=0sec 2024-11-17T01:27:15,903 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:15,903 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:15,903 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:15,905 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:15,905 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:15,905 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:15,905 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=35.2 K 2024-11-17T01:27:15,908 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 72d019905ed541f8b5eb32b987366ac5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1731806834011 2024-11-17T01:27:15,908 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c9725dfa1c2d4057b7d0a2e474167fde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806834030 2024-11-17T01:27:15,909 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a4e9c49da4c8441c8e4a6ffbac55dbd1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:15,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741987_1163 (size=4469) 2024-11-17T01:27:15,916 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#142 average throughput is 0.38 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:15,919 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ddbd52825d4d461f95e6e61df73e7235 is 175, key is test_row_0/A:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,926 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#143 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:15,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/f769181ed1c04568a9bccd320c2b7d1d is 50, key is test_row_0/C:col10/1731806835178/Put/seqid=0 2024-11-17T01:27:15,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741988_1164 (size=31058) 2024-11-17T01:27:15,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:27:15,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:15,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:15,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,944 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:15,945 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:15,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741989_1165 (size=12104) 2024-11-17T01:27:15,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111722b8d707850b4c8ca57bdf2650764306_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806835319/Put/seqid=0 2024-11-17T01:27:15,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,959 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/f769181ed1c04568a9bccd320c2b7d1d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f769181ed1c04568a9bccd320c2b7d1d 2024-11-17T01:27:15,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806895956, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,966 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into f769181ed1c04568a9bccd320c2b7d1d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:15,966 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:15,966 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=13, startTime=1731806835842; duration=0sec 2024-11-17T01:27:15,966 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:15,966 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:15,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806895957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806895959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,967 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806895960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:15,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806895963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:15,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741990_1166 (size=12154) 2024-11-17T01:27:15,977 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:15,983 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111722b8d707850b4c8ca57bdf2650764306_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111722b8d707850b4c8ca57bdf2650764306_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:15,986 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ff00ee24257640eba77cc7d577a77871, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:15,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ff00ee24257640eba77cc7d577a77871 is 175, key is test_row_0/A:col10/1731806835319/Put/seqid=0 2024-11-17T01:27:16,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741991_1167 (size=30955) 2024-11-17T01:27:16,002 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ff00ee24257640eba77cc7d577a77871 2024-11-17T01:27:16,014 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/86d15b44e3714f17a120ccd1dd82fc7e is 50, key is test_row_0/B:col10/1731806835319/Put/seqid=0 2024-11-17T01:27:16,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741992_1168 (size=12001) 2024-11-17T01:27:16,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806896061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806896069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806896069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806896069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806896069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-17T01:27:16,105 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-17T01:27:16,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:16,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-17T01:27:16,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-17T01:27:16,108 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:16,109 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:16,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:16,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-17T01:27:16,260 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-17T01:27:16,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:16,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:16,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:16,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,267 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806896266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806896274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806896274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806896274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806896275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,348 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ddbd52825d4d461f95e6e61df73e7235 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235 2024-11-17T01:27:16,354 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into ddbd52825d4d461f95e6e61df73e7235(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:16,354 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:16,354 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=13, startTime=1731806835834; duration=0sec 2024-11-17T01:27:16,354 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:16,354 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:16,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-17T01:27:16,414 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-17T01:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:16,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:16,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,419 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:16,424 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/86d15b44e3714f17a120ccd1dd82fc7e 2024-11-17T01:27:16,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/7069ea3642244a4399b940b2ec422540 is 50, key is test_row_0/C:col10/1731806835319/Put/seqid=0 2024-11-17T01:27:16,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741993_1169 (size=12001) 2024-11-17T01:27:16,445 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/7069ea3642244a4399b940b2ec422540 2024-11-17T01:27:16,452 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/ff00ee24257640eba77cc7d577a77871 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871 2024-11-17T01:27:16,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871, entries=150, sequenceid=79, filesize=30.2 K 2024-11-17T01:27:16,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/86d15b44e3714f17a120ccd1dd82fc7e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e 2024-11-17T01:27:16,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e, entries=150, sequenceid=79, filesize=11.7 K 2024-11-17T01:27:16,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/7069ea3642244a4399b940b2ec422540 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540 2024-11-17T01:27:16,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540, entries=150, sequenceid=79, filesize=11.7 K 2024-11-17T01:27:16,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 538ms, sequenceid=79, compaction requested=false 2024-11-17T01:27:16,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:16,571 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-17T01:27:16,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:16,572 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-17T01:27:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:16,573 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:16,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:16,583 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117deed8a201479472292b34c982fbee194_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806836571/Put/seqid=0 2024-11-17T01:27:16,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806896600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806896600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806896601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806896604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806896606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741994_1170 (size=14594) 2024-11-17T01:27:16,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:16,623 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117deed8a201479472292b34c982fbee194_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117deed8a201479472292b34c982fbee194_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1a3e15c7a408475d888ae23cc7761d22, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:16,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1a3e15c7a408475d888ae23cc7761d22 is 175, key is test_row_0/A:col10/1731806836571/Put/seqid=0 2024-11-17T01:27:16,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741995_1171 (size=39549) 2024-11-17T01:27:16,630 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1a3e15c7a408475d888ae23cc7761d22 2024-11-17T01:27:16,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/6a3b0ad7b481497e985f6d4a5a7f85b2 is 50, key is test_row_0/B:col10/1731806836571/Put/seqid=0 2024-11-17T01:27:16,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741996_1172 (size=12001) 2024-11-17T01:27:16,644 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/6a3b0ad7b481497e985f6d4a5a7f85b2 2024-11-17T01:27:16,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/14226bbe95384a0992dba298214b6d67 is 50, key is test_row_0/C:col10/1731806836571/Put/seqid=0 2024-11-17T01:27:16,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741997_1173 (size=12001) 2024-11-17T01:27:16,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-17T01:27:16,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806896709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806896710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806896711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806896713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806896713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,913 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806896913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,914 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806896913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,917 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806896915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806896915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:16,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:16,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806896915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,066 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/14226bbe95384a0992dba298214b6d67 2024-11-17T01:27:17,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1a3e15c7a408475d888ae23cc7761d22 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22 2024-11-17T01:27:17,075 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22, entries=200, sequenceid=96, filesize=38.6 K 2024-11-17T01:27:17,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/6a3b0ad7b481497e985f6d4a5a7f85b2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2 2024-11-17T01:27:17,081 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2, entries=150, sequenceid=96, filesize=11.7 K 2024-11-17T01:27:17,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/14226bbe95384a0992dba298214b6d67 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67 2024-11-17T01:27:17,088 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67, entries=150, sequenceid=96, filesize=11.7 K 2024-11-17T01:27:17,090 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 518ms, sequenceid=96, compaction requested=true 2024-11-17T01:27:17,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:17,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-17T01:27:17,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-17T01:27:17,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-17T01:27:17,094 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 983 msec 2024-11-17T01:27:17,099 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 988 msec 2024-11-17T01:27:17,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-17T01:27:17,212 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-17T01:27:17,213 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:17,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-17T01:27:17,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:17,215 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:17,215 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:17,216 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:17,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:17,218 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:27:17,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:17,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:17,218 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:17,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:17,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:17,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:17,228 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117cca251fcd7b04ed0a9af840f8bea4bc3_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:17,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741998_1174 (size=14594) 2024-11-17T01:27:17,237 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:17,242 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117cca251fcd7b04ed0a9af840f8bea4bc3_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cca251fcd7b04ed0a9af840f8bea4bc3_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:17,244 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/6b54b59380d548e3bc7c4e8631f3cfc6, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:17,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/6b54b59380d548e3bc7c4e8631f3cfc6 is 175, key is test_row_0/A:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:17,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741999_1175 (size=39549) 2024-11-17T01:27:17,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,275 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806897234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806897235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806897275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,282 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806897275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806897275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:17,367 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,368 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:17,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:17,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,368 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,369 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,378 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806897377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806897377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806897384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806897384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,386 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806897384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:17,520 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,521 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:17,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:17,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,521 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806897579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806897580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806897586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806897587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,590 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806897587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,657 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=118, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/6b54b59380d548e3bc7c4e8631f3cfc6 2024-11-17T01:27:17,664 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/4be8892833fc4f47bfd85e8318aa3907 is 50, key is test_row_0/B:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:17,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742000_1176 (size=12001) 2024-11-17T01:27:17,673 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,674 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,816 INFO [master/04f7e7347dc7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-17T01:27:17,816 INFO [master/04f7e7347dc7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-17T01:27:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:17,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:17,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:17,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,830 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,887 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806897884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806897885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806897893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,893 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806897893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:17,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806897893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,981 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:17,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:17,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:17,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:17,982 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:17,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/4be8892833fc4f47bfd85e8318aa3907 2024-11-17T01:27:18,077 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/18eeca80d08a45b4a7b320c17f510410 is 50, key is test_row_0/C:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:18,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742001_1177 (size=12001) 2024-11-17T01:27:18,134 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:18,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:18,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,287 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,288 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:18,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:18,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806898390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:18,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806898391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:18,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806898395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:18,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806898396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:18,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806898398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,440 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,441 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:18,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:18,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,441 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] handler.RSProcedureHandler(58): pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,441 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=48 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=48 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:18,484 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=118 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/18eeca80d08a45b4a7b320c17f510410 2024-11-17T01:27:18,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/6b54b59380d548e3bc7c4e8631f3cfc6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6 2024-11-17T01:27:18,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6, entries=200, sequenceid=118, filesize=38.6 K 2024-11-17T01:27:18,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/4be8892833fc4f47bfd85e8318aa3907 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907 2024-11-17T01:27:18,498 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907, entries=150, sequenceid=118, filesize=11.7 K 2024-11-17T01:27:18,498 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/18eeca80d08a45b4a7b320c17f510410 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410 2024-11-17T01:27:18,503 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410, entries=150, sequenceid=118, filesize=11.7 K 2024-11-17T01:27:18,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1285ms, sequenceid=118, compaction requested=true 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:18,504 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:18,504 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:18,504 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:18,505 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141111 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:18,506 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:18,506 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,506 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=137.8 K 2024-11-17T01:27:18,506 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,506 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6] 2024-11-17T01:27:18,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:18,506 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddbd52825d4d461f95e6e61df73e7235, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:18,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:18,507 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,507 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=47.0 K 2024-11-17T01:27:18,507 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5dfaff13bd0f4cd687d7f7d4ddb5dc76, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:18,507 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff00ee24257640eba77cc7d577a77871, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731806835317 2024-11-17T01:27:18,508 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1a3e15c7a408475d888ae23cc7761d22, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806835955 2024-11-17T01:27:18,508 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 86d15b44e3714f17a120ccd1dd82fc7e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731806835317 2024-11-17T01:27:18,508 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 6a3b0ad7b481497e985f6d4a5a7f85b2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806835955 2024-11-17T01:27:18,508 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b54b59380d548e3bc7c4e8631f3cfc6, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:18,509 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4be8892833fc4f47bfd85e8318aa3907, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:18,518 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#153 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:18,519 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/69c9effb15cd48dd9ce39599cde8f291 is 50, key is test_row_0/B:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:18,523 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:18,531 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411170acc70c7983e4f29b8f353cf951c1f78_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:18,536 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411170acc70c7983e4f29b8f353cf951c1f78_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:18,536 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411170acc70c7983e4f29b8f353cf951c1f78_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:18,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742002_1178 (size=12241) 2024-11-17T01:27:18,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742003_1179 (size=4469) 2024-11-17T01:27:18,551 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#154 average throughput is 0.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:18,552 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/241c2a2eb51044268764e37954504cc2 is 175, key is test_row_0/A:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:18,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742004_1180 (size=31195) 2024-11-17T01:27:18,593 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:18,593 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-17T01:27:18,593 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,595 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:18,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:18,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111725ae920c2a1a469e950cf80cc3453ffd_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806837237/Put/seqid=0 2024-11-17T01:27:18,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742005_1181 (size=12254) 2024-11-17T01:27:18,978 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/241c2a2eb51044268764e37954504cc2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2 2024-11-17T01:27:18,978 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/69c9effb15cd48dd9ce39599cde8f291 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/69c9effb15cd48dd9ce39599cde8f291 2024-11-17T01:27:18,983 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into 69c9effb15cd48dd9ce39599cde8f291(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:18,983 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into 241c2a2eb51044268764e37954504cc2(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:18,983 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:18,983 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:18,983 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=12, startTime=1731806838504; duration=0sec 2024-11-17T01:27:18,983 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=12, startTime=1731806838504; duration=0sec 2024-11-17T01:27:18,984 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:18,984 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:18,984 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:18,984 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:18,984 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:18,985 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:18,985 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:18,985 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:18,985 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f769181ed1c04568a9bccd320c2b7d1d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=47.0 K 2024-11-17T01:27:18,986 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f769181ed1c04568a9bccd320c2b7d1d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806835178 2024-11-17T01:27:18,986 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7069ea3642244a4399b940b2ec422540, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731806835317 2024-11-17T01:27:18,986 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 14226bbe95384a0992dba298214b6d67, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806835955 2024-11-17T01:27:18,987 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18eeca80d08a45b4a7b320c17f510410, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:19,002 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:19,003 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/ffe3facf182441b796eb3149d83e9f30 is 50, key is test_row_0/C:col10/1731806836604/Put/seqid=0 2024-11-17T01:27:19,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:19,015 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111725ae920c2a1a469e950cf80cc3453ffd_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111725ae920c2a1a469e950cf80cc3453ffd_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:19,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742006_1182 (size=12241) 2024-11-17T01:27:19,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7ab26ed32f2045d18f95e44033eaf426, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:19,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7ab26ed32f2045d18f95e44033eaf426 is 175, key is test_row_0/A:col10/1731806837237/Put/seqid=0 2024-11-17T01:27:19,022 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/ffe3facf182441b796eb3149d83e9f30 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffe3facf182441b796eb3149d83e9f30 2024-11-17T01:27:19,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742007_1183 (size=31055) 2024-11-17T01:27:19,024 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7ab26ed32f2045d18f95e44033eaf426 2024-11-17T01:27:19,029 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into ffe3facf182441b796eb3149d83e9f30(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:19,029 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:19,029 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=12, startTime=1731806838504; duration=0sec 2024-11-17T01:27:19,029 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:19,029 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:19,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7b025d48498147c6811556e90615de21 is 50, key is test_row_0/B:col10/1731806837237/Put/seqid=0 2024-11-17T01:27:19,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742008_1184 (size=12101) 2024-11-17T01:27:19,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:19,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:19,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:19,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806899414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806899415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806899416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806899416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,423 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806899420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,452 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7b025d48498147c6811556e90615de21 2024-11-17T01:27:19,459 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/37f53171996e45019f8fa5173fc6f9aa is 50, key is test_row_0/C:col10/1731806837237/Put/seqid=0 2024-11-17T01:27:19,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742009_1185 (size=12101) 2024-11-17T01:27:19,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806899519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806899519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806899520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806899524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,526 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806899524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806899722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806899723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,726 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806899724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806899727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806899728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:19,885 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/37f53171996e45019f8fa5173fc6f9aa 2024-11-17T01:27:19,889 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7ab26ed32f2045d18f95e44033eaf426 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426 2024-11-17T01:27:19,893 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426, entries=150, sequenceid=132, filesize=30.3 K 2024-11-17T01:27:19,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/7b025d48498147c6811556e90615de21 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21 2024-11-17T01:27:19,899 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21, entries=150, sequenceid=132, filesize=11.8 K 2024-11-17T01:27:19,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/37f53171996e45019f8fa5173fc6f9aa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa 2024-11-17T01:27:19,904 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa, entries=150, sequenceid=132, filesize=11.8 K 2024-11-17T01:27:19,905 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1310ms, sequenceid=132, compaction requested=false 2024-11-17T01:27:19,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:19,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:19,905 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-17T01:27:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-17T01:27:19,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-17T01:27:19,908 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6910 sec 2024-11-17T01:27:19,909 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 2.6950 sec 2024-11-17T01:27:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:20,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:20,028 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:20,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806900035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806900036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806900037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411175bef286249174c68aa25c1df4a6a8f35_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806900038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806900039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742010_1186 (size=12304) 2024-11-17T01:27:20,142 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806900140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806900141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806900141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806900143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806900143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,345 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806900344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806900346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,347 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806900346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806900346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806900346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,461 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:20,465 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411175bef286249174c68aa25c1df4a6a8f35_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411175bef286249174c68aa25c1df4a6a8f35_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:20,466 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/0c954cf0808b454c9a05bfb578dce361, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:20,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/0c954cf0808b454c9a05bfb578dce361 is 175, key is test_row_0/A:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:20,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742011_1187 (size=31105) 2024-11-17T01:27:20,471 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/0c954cf0808b454c9a05bfb578dce361 2024-11-17T01:27:20,491 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/66191ba920314a018e1313e26cc9cff4 is 50, key is test_row_0/B:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:20,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742012_1188 (size=12151) 2024-11-17T01:27:20,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806900648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,650 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806900649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806900649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,651 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806900649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:20,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806900651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:20,908 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/66191ba920314a018e1313e26cc9cff4 2024-11-17T01:27:20,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/40b9a339eb9e4322915b4d6af509ff60 is 50, key is test_row_0/C:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:20,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742013_1189 (size=12151) 2024-11-17T01:27:20,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/40b9a339eb9e4322915b4d6af509ff60 2024-11-17T01:27:20,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/0c954cf0808b454c9a05bfb578dce361 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361 2024-11-17T01:27:20,944 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361, entries=150, sequenceid=159, filesize=30.4 K 2024-11-17T01:27:20,945 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/66191ba920314a018e1313e26cc9cff4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4 2024-11-17T01:27:20,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4, entries=150, sequenceid=159, filesize=11.9 K 2024-11-17T01:27:20,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/40b9a339eb9e4322915b4d6af509ff60 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60 2024-11-17T01:27:20,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60, entries=150, sequenceid=159, filesize=11.9 K 2024-11-17T01:27:20,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 932ms, sequenceid=159, compaction requested=true 2024-11-17T01:27:20,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:20,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:20,959 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:20,959 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:20,960 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:20,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:20,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:20,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:20,960 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:20,961 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93355 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:20,961 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:20,961 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:20,962 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=91.2 K 2024-11-17T01:27:20,962 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:20,962 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361] 2024-11-17T01:27:20,962 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:20,962 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:20,962 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:20,962 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/69c9effb15cd48dd9ce39599cde8f291, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=35.6 K 2024-11-17T01:27:20,963 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 241c2a2eb51044268764e37954504cc2, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:20,964 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 69c9effb15cd48dd9ce39599cde8f291, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:20,964 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7ab26ed32f2045d18f95e44033eaf426, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731806837233 2024-11-17T01:27:20,964 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b025d48498147c6811556e90615de21, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731806837233 2024-11-17T01:27:20,964 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c954cf0808b454c9a05bfb578dce361, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:20,964 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 66191ba920314a018e1313e26cc9cff4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:20,972 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:20,973 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#162 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:20,974 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/8d9a433bd04041bfbac12e4fdda7606e is 50, key is test_row_0/B:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:20,975 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117c240d67a18534de9b23b766d2a4538d3_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:20,977 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117c240d67a18534de9b23b766d2a4538d3_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:20,978 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117c240d67a18534de9b23b766d2a4538d3_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:21,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742014_1190 (size=12493) 2024-11-17T01:27:21,013 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/8d9a433bd04041bfbac12e4fdda7606e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/8d9a433bd04041bfbac12e4fdda7606e 2024-11-17T01:27:21,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742015_1191 (size=4469) 2024-11-17T01:27:21,015 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#163 average throughput is 0.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:21,015 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1c3b39cc81ad463197f0fbad96efb15f is 175, key is test_row_0/A:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:21,027 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into 8d9a433bd04041bfbac12e4fdda7606e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:21,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:21,027 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=13, startTime=1731806840959; duration=0sec 2024-11-17T01:27:21,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:21,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:21,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:21,029 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36493 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:21,029 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:21,029 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,029 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffe3facf182441b796eb3149d83e9f30, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=35.6 K 2024-11-17T01:27:21,030 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ffe3facf182441b796eb3149d83e9f30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=118, earliestPutTs=1731806836598 2024-11-17T01:27:21,030 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 37f53171996e45019f8fa5173fc6f9aa, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731806837233 2024-11-17T01:27:21,030 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 40b9a339eb9e4322915b4d6af509ff60, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:21,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742016_1192 (size=31447) 2024-11-17T01:27:21,043 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/1c3b39cc81ad463197f0fbad96efb15f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f 2024-11-17T01:27:21,051 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into 1c3b39cc81ad463197f0fbad96efb15f(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:21,051 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:21,051 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=13, startTime=1731806840959; duration=0sec 2024-11-17T01:27:21,051 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:21,051 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:21,061 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#164 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:21,062 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/2dad7c71ce23414a88829726661cd87e is 50, key is test_row_0/C:col10/1731806840027/Put/seqid=0 2024-11-17T01:27:21,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742017_1193 (size=12493) 2024-11-17T01:27:21,087 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/2dad7c71ce23414a88829726661cd87e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/2dad7c71ce23414a88829726661cd87e 2024-11-17T01:27:21,095 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into 2dad7c71ce23414a88829726661cd87e(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:21,095 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:21,095 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=13, startTime=1731806840960; duration=0sec 2024-11-17T01:27:21,098 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:21,098 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:21,154 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-17T01:27:21,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:21,156 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117994da934157a4614acf010097cd2de88_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806840036/Put/seqid=0 2024-11-17T01:27:21,173 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-17T01:27:21,173 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-17T01:27:21,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806901175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806901178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806901178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,183 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806901179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806901180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742018_1194 (size=14794) 2024-11-17T01:27:21,202 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:21,207 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117994da934157a4614acf010097cd2de88_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117994da934157a4614acf010097cd2de88_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:21,208 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8708d720884649afb038d4479780b41c, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:21,209 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8708d720884649afb038d4479780b41c is 175, key is test_row_0/A:col10/1731806840036/Put/seqid=0 2024-11-17T01:27:21,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742019_1195 (size=39749) 2024-11-17T01:27:21,225 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8708d720884649afb038d4479780b41c 2024-11-17T01:27:21,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/3f5951a57a394ff1a843764aa23dfe51 is 50, key is test_row_0/B:col10/1731806840036/Put/seqid=0 2024-11-17T01:27:21,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742020_1196 (size=12151) 2024-11-17T01:27:21,283 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806901281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806901285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806901285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806901285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,287 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806901285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-17T01:27:21,320 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-17T01:27:21,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-17T01:27:21,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:21,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:21,323 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:21,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:21,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:21,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-17T01:27:21,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:21,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,486 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806901485, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806901487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806901487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806901487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,490 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806901489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:21,626 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-17T01:27:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,627 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] handler.RSProcedureHandler(58): pid=50 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=50 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=50 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:21,645 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/3f5951a57a394ff1a843764aa23dfe51 2024-11-17T01:27:21,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8b52b8b1d370467082e2ce6ce312d18b is 50, key is test_row_0/C:col10/1731806840036/Put/seqid=0 2024-11-17T01:27:21,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742021_1197 (size=12151) 2024-11-17T01:27:21,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8b52b8b1d370467082e2ce6ce312d18b 2024-11-17T01:27:21,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8708d720884649afb038d4479780b41c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c 2024-11-17T01:27:21,668 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c, entries=200, sequenceid=176, filesize=38.8 K 2024-11-17T01:27:21,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/3f5951a57a394ff1a843764aa23dfe51 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51 2024-11-17T01:27:21,674 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51, entries=150, sequenceid=176, filesize=11.9 K 2024-11-17T01:27:21,676 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8b52b8b1d370467082e2ce6ce312d18b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b 2024-11-17T01:27:21,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b, entries=150, sequenceid=176, filesize=11.9 K 2024-11-17T01:27:21,682 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 528ms, sequenceid=176, compaction requested=false 2024-11-17T01:27:21,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:21,778 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-17T01:27:21,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:21,779 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:21,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:21,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f35a5725d3be4b83b7eb23f1308ce6e2_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806841178/Put/seqid=0 2024-11-17T01:27:21,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:21,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:21,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806901798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806901800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806901800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806901801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806901801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742022_1198 (size=12304) 2024-11-17T01:27:21,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806901902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806901904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806901905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,908 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:21,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806901906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806901905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:21,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:22,105 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806902104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806902109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806902109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806902110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806902110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:22,213 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f35a5725d3be4b83b7eb23f1308ce6e2_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f35a5725d3be4b83b7eb23f1308ce6e2_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:22,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8c0f7a31d75147b2a2bdef2a2aedaef8, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:22,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8c0f7a31d75147b2a2bdef2a2aedaef8 is 175, key is test_row_0/A:col10/1731806841178/Put/seqid=0 2024-11-17T01:27:22,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742023_1199 (size=31105) 2024-11-17T01:27:22,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806902408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806902413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806902413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806902414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806902414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:22,619 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8c0f7a31d75147b2a2bdef2a2aedaef8 2024-11-17T01:27:22,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/dd8e2109b72a4bef991d4f7edb2278f9 is 50, key is test_row_0/B:col10/1731806841178/Put/seqid=0 2024-11-17T01:27:22,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742024_1200 (size=12151) 2024-11-17T01:27:22,634 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/dd8e2109b72a4bef991d4f7edb2278f9 2024-11-17T01:27:22,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/e0694e5e28ad48d8be05b045cc1b4fd3 is 50, key is test_row_0/C:col10/1731806841178/Put/seqid=0 2024-11-17T01:27:22,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742025_1201 (size=12151) 2024-11-17T01:27:22,646 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/e0694e5e28ad48d8be05b045cc1b4fd3 2024-11-17T01:27:22,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/8c0f7a31d75147b2a2bdef2a2aedaef8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8 2024-11-17T01:27:22,654 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8, entries=150, sequenceid=198, filesize=30.4 K 2024-11-17T01:27:22,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/dd8e2109b72a4bef991d4f7edb2278f9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9 2024-11-17T01:27:22,659 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9, entries=150, sequenceid=198, filesize=11.9 K 2024-11-17T01:27:22,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/e0694e5e28ad48d8be05b045cc1b4fd3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3 2024-11-17T01:27:22,666 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3, entries=150, sequenceid=198, filesize=11.9 K 2024-11-17T01:27:22,667 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 889ms, sequenceid=198, compaction requested=true 2024-11-17T01:27:22,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:22,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:22,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-17T01:27:22,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-17T01:27:22,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-17T01:27:22,673 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3460 sec 2024-11-17T01:27:22,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.3520 sec 2024-11-17T01:27:22,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:22,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:22,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:22,927 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f37c3b649d0b4ef1849c61b54eae2e62_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:22,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742026_1202 (size=14794) 2024-11-17T01:27:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806902932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,936 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:22,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806902932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806902933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806902933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,940 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f37c3b649d0b4ef1849c61b54eae2e62_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f37c3b649d0b4ef1849c61b54eae2e62_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:22,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:22,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806902938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:22,942 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e660d5ddf7ab4747a6d530c7d22a887f, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:22,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e660d5ddf7ab4747a6d530c7d22a887f is 175, key is test_row_0/A:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:22,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742027_1203 (size=39749) 2024-11-17T01:27:23,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806903038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806903038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,040 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806903038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,040 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806903038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,045 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806903042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806903241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,244 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806903241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806903242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806903242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,247 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806903246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,347 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e660d5ddf7ab4747a6d530c7d22a887f 2024-11-17T01:27:23,354 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/5298d74de42f472cb7c29c8ce4a1a1cc is 50, key is test_row_0/B:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:23,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742028_1204 (size=12151) 2024-11-17T01:27:23,357 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/5298d74de42f472cb7c29c8ce4a1a1cc 2024-11-17T01:27:23,364 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/851f4292866543d19a1b710eee45f511 is 50, key is test_row_0/C:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:23,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742029_1205 (size=12151) 2024-11-17T01:27:23,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-17T01:27:23,427 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-17T01:27:23,428 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:23,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-17T01:27:23,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:23,429 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:23,430 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:23,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:23,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:23,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806903545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,548 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806903546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806903547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806903547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:23,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806903550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,581 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,582 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-17T01:27:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,582 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:23,734 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,734 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-17T01:27:23,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,735 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:23,768 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/851f4292866543d19a1b710eee45f511 2024-11-17T01:27:23,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e660d5ddf7ab4747a6d530c7d22a887f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f 2024-11-17T01:27:23,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f, entries=200, sequenceid=214, filesize=38.8 K 2024-11-17T01:27:23,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/5298d74de42f472cb7c29c8ce4a1a1cc as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc 2024-11-17T01:27:23,780 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc, entries=150, sequenceid=214, filesize=11.9 K 2024-11-17T01:27:23,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/851f4292866543d19a1b710eee45f511 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511 2024-11-17T01:27:23,786 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511, entries=150, sequenceid=214, filesize=11.9 K 2024-11-17T01:27:23,787 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 868ms, sequenceid=214, compaction requested=true 2024-11-17T01:27:23,787 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:23,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:23,787 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:23,787 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:23,787 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:23,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:23,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:23,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:23,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:23,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:23,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:23,789 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,789 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/8d9a433bd04041bfbac12e4fdda7606e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=47.8 K 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d9a433bd04041bfbac12e4fdda7606e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142050 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f5951a57a394ff1a843764aa23dfe51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731806840036 2024-11-17T01:27:23,790 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,790 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=138.7 K 2024-11-17T01:27:23,790 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting dd8e2109b72a4bef991d4f7edb2278f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731806841172 2024-11-17T01:27:23,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f] 2024-11-17T01:27:23,791 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c3b39cc81ad463197f0fbad96efb15f, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:23,791 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5298d74de42f472cb7c29c8ce4a1a1cc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:23,791 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8708d720884649afb038d4479780b41c, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731806840036 2024-11-17T01:27:23,792 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c0f7a31d75147b2a2bdef2a2aedaef8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731806841172 2024-11-17T01:27:23,792 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting e660d5ddf7ab4747a6d530c7d22a887f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:23,800 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:23,801 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#174 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:23,801 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/e3ca5e7ed64943848cd5af25ed12ee90 is 50, key is test_row_0/B:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:23,804 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411171d5c23bf2a704282a583a9065c4c6fd3_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:23,807 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411171d5c23bf2a704282a583a9065c4c6fd3_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:23,807 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411171d5c23bf2a704282a583a9065c4c6fd3_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:23,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742030_1206 (size=12629) 2024-11-17T01:27:23,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742031_1207 (size=4469) 2024-11-17T01:27:23,886 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:23,887 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-17T01:27:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:23,887 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:27:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:23,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:23,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:23,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:23,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:23,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:23,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117637583c1ccf74682a230a6a39f880841_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806842930/Put/seqid=0 2024-11-17T01:27:23,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742032_1208 (size=12304) 2024-11-17T01:27:24,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:24,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:24,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:24,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806904061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806904062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806904063, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806904064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806904066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806904167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806904168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806904168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806904168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806904170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,215 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/e3ca5e7ed64943848cd5af25ed12ee90 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/e3ca5e7ed64943848cd5af25ed12ee90 2024-11-17T01:27:24,215 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#175 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:24,216 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/999f5fd9122f4f3986a95a7dd1752351 is 175, key is test_row_0/A:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:24,221 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into e3ca5e7ed64943848cd5af25ed12ee90(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:24,221 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:24,221 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=12, startTime=1731806843787; duration=0sec 2024-11-17T01:27:24,221 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:24,221 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:24,221 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:24,222 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:24,222 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:24,222 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:24,223 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/2dad7c71ce23414a88829726661cd87e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=47.8 K 2024-11-17T01:27:24,223 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2dad7c71ce23414a88829726661cd87e, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1731806839414 2024-11-17T01:27:24,223 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b52b8b1d370467082e2ce6ce312d18b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1731806840036 2024-11-17T01:27:24,224 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e0694e5e28ad48d8be05b045cc1b4fd3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731806841172 2024-11-17T01:27:24,224 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 851f4292866543d19a1b710eee45f511, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:24,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742033_1209 (size=31583) 2024-11-17T01:27:24,233 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#177 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:24,233 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/f0ee315619794b5695d6ddfb86fecc70 is 50, key is test_row_0/C:col10/1731806842918/Put/seqid=0 2024-11-17T01:27:24,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742034_1210 (size=12629) 2024-11-17T01:27:24,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:24,308 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117637583c1ccf74682a230a6a39f880841_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117637583c1ccf74682a230a6a39f880841_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:24,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/15972a0c519c4a02b16a2b2b8e1bd2bb, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:24,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/15972a0c519c4a02b16a2b2b8e1bd2bb is 175, key is test_row_0/A:col10/1731806842930/Put/seqid=0 2024-11-17T01:27:24,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742035_1211 (size=31105) 2024-11-17T01:27:24,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806904370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806904370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806904370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806904371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806904373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:24,634 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/999f5fd9122f4f3986a95a7dd1752351 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351 2024-11-17T01:27:24,639 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into 999f5fd9122f4f3986a95a7dd1752351(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:24,640 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:24,640 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=12, startTime=1731806843787; duration=0sec 2024-11-17T01:27:24,640 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:24,640 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:24,642 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/f0ee315619794b5695d6ddfb86fecc70 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f0ee315619794b5695d6ddfb86fecc70 2024-11-17T01:27:24,646 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into f0ee315619794b5695d6ddfb86fecc70(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:24,646 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:24,646 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=12, startTime=1731806843788; duration=0sec 2024-11-17T01:27:24,646 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:24,646 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:24,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806904676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806904676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806904676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806904677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,679 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806904678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:24,714 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/15972a0c519c4a02b16a2b2b8e1bd2bb 2024-11-17T01:27:24,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/18cac9ac289c47658a30b9cc45b5ede6 is 50, key is test_row_0/B:col10/1731806842930/Put/seqid=0 2024-11-17T01:27:24,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742036_1212 (size=12151) 2024-11-17T01:27:25,127 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/18cac9ac289c47658a30b9cc45b5ede6 2024-11-17T01:27:25,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/abfda3fa50a44587bd0c0735bb932929 is 50, key is test_row_0/C:col10/1731806842930/Put/seqid=0 2024-11-17T01:27:25,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742037_1213 (size=12151) 2024-11-17T01:27:25,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:25,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806905179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:25,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:25,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806905179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:25,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:25,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806905181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:25,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806905182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:25,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:25,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806905184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:25,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:25,541 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/abfda3fa50a44587bd0c0735bb932929 2024-11-17T01:27:25,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/15972a0c519c4a02b16a2b2b8e1bd2bb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb 2024-11-17T01:27:25,550 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb, entries=150, sequenceid=234, filesize=30.4 K 2024-11-17T01:27:25,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/18cac9ac289c47658a30b9cc45b5ede6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6 2024-11-17T01:27:25,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,555 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6, entries=150, sequenceid=234, filesize=11.9 K 2024-11-17T01:27:25,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/abfda3fa50a44587bd0c0735bb932929 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929 2024-11-17T01:27:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,560 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929, entries=150, sequenceid=234, filesize=11.9 K 2024-11-17T01:27:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,561 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1673ms, sequenceid=234, compaction requested=false 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:25,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:25,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-17T01:27:25,565 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1320 sec 2024-11-17T01:27:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,566 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 2.1370 sec 2024-11-17T01:27:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:25,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:26,192 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-17T01:27:26,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:26,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:26,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,195 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:26,196 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f3694aafd4cf45e19fa0b417ff728e11_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806906217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806906217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806906222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,226 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806906222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806906223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742038_1214 (size=14794) 2024-11-17T01:27:26,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806906324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806906326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,327 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806906327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806906329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806906329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806906528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806906528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,529 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806906528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806906531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806906532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,631 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:26,638 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f3694aafd4cf45e19fa0b417ff728e11_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f3694aafd4cf45e19fa0b417ff728e11_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:26,639 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/74e394ab44ee44f098e32111c58f9e43, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:26,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/74e394ab44ee44f098e32111c58f9e43 is 175, key is test_row_0/A:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742039_1215 (size=39749) 2024-11-17T01:27:26,643 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/74e394ab44ee44f098e32111c58f9e43 2024-11-17T01:27:26,653 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a752c8738f454d4e8017d223e14b0985 is 50, key is test_row_0/B:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742040_1216 (size=12151) 2024-11-17T01:27:26,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a752c8738f454d4e8017d223e14b0985 2024-11-17T01:27:26,687 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/48a3d2e8e3ad483b8a57b892768070de is 50, key is test_row_0/C:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742041_1217 (size=12151) 2024-11-17T01:27:26,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/48a3d2e8e3ad483b8a57b892768070de 2024-11-17T01:27:26,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/74e394ab44ee44f098e32111c58f9e43 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43 2024-11-17T01:27:26,710 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43, entries=200, sequenceid=255, filesize=38.8 K 2024-11-17T01:27:26,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a752c8738f454d4e8017d223e14b0985 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985 2024-11-17T01:27:26,716 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985, entries=150, sequenceid=255, filesize=11.9 K 2024-11-17T01:27:26,717 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/48a3d2e8e3ad483b8a57b892768070de as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de 2024-11-17T01:27:26,721 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de, entries=150, sequenceid=255, filesize=11.9 K 2024-11-17T01:27:26,722 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 530ms, sequenceid=255, compaction requested=true 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:26,723 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:26,723 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:26,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:26,724 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:26,724 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102437 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:26,724 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:26,724 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:26,724 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:26,724 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:26,724 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=100.0 K 2024-11-17T01:27:26,724 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/e3ca5e7ed64943848cd5af25ed12ee90, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=36.1 K 2024-11-17T01:27:26,725 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:26,725 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43] 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e3ca5e7ed64943848cd5af25ed12ee90, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 999f5fd9122f4f3986a95a7dd1752351, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 18cac9ac289c47658a30b9cc45b5ede6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806842930 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15972a0c519c4a02b16a2b2b8e1bd2bb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806842930 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a752c8738f454d4e8017d223e14b0985, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844065 2024-11-17T01:27:26,726 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74e394ab44ee44f098e32111c58f9e43, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844061 2024-11-17T01:27:26,734 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:26,735 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#183 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:26,736 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/049da9e9548c4c8280eb06b903eee690 is 50, key is test_row_0/B:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,739 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117f90230e56ee342ffaa19c9e32e93bcf6_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:26,741 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117f90230e56ee342ffaa19c9e32e93bcf6_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:26,741 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f90230e56ee342ffaa19c9e32e93bcf6_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:26,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742042_1218 (size=12731) 2024-11-17T01:27:26,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742043_1219 (size=4469) 2024-11-17T01:27:26,750 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#184 average throughput is 1.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:26,750 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/27afb77ad1a145ffb3a989b28ba7805f is 175, key is test_row_0/A:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742044_1220 (size=31685) 2024-11-17T01:27:26,758 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/049da9e9548c4c8280eb06b903eee690 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/049da9e9548c4c8280eb06b903eee690 2024-11-17T01:27:26,761 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/27afb77ad1a145ffb3a989b28ba7805f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f 2024-11-17T01:27:26,768 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into 049da9e9548c4c8280eb06b903eee690(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:26,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:26,768 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=13, startTime=1731806846723; duration=0sec 2024-11-17T01:27:26,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:26,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:26,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:26,769 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:26,769 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:26,769 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:26,770 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f0ee315619794b5695d6ddfb86fecc70, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=36.1 K 2024-11-17T01:27:26,770 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into 27afb77ad1a145ffb3a989b28ba7805f(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:26,770 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:26,770 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=13, startTime=1731806846723; duration=0sec 2024-11-17T01:27:26,770 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:26,770 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:26,770 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f0ee315619794b5695d6ddfb86fecc70, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731806841799 2024-11-17T01:27:26,770 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting abfda3fa50a44587bd0c0735bb932929, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806842930 2024-11-17T01:27:26,771 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 48a3d2e8e3ad483b8a57b892768070de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844065 2024-11-17T01:27:26,779 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#185 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:26,779 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/fd42316f68c844cca66e19441a300e5f is 50, key is test_row_0/C:col10/1731806844065/Put/seqid=0 2024-11-17T01:27:26,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742045_1221 (size=12731) 2024-11-17T01:27:26,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:26,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:27:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:26,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:26,842 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117437e874a709f4ad3b0250ef58505861f_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806846831/Put/seqid=0 2024-11-17T01:27:26,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806906842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,849 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806906842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806906846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806906846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806906849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742046_1222 (size=14994) 2024-11-17T01:27:26,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806906950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,950 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806906950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806906952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806906952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:26,954 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:26,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806906952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806907151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806907152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806907155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806907155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806907156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,188 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/fd42316f68c844cca66e19441a300e5f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/fd42316f68c844cca66e19441a300e5f 2024-11-17T01:27:27,193 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into fd42316f68c844cca66e19441a300e5f(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:27,193 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:27,193 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=13, startTime=1731806846723; duration=0sec 2024-11-17T01:27:27,194 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:27,194 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:27,257 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:27,260 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117437e874a709f4ad3b0250ef58505861f_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117437e874a709f4ad3b0250ef58505861f_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:27,261 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b77d8f7defdd4c39bcadc53df9ce44b0, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:27,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b77d8f7defdd4c39bcadc53df9ce44b0 is 175, key is test_row_0/A:col10/1731806846831/Put/seqid=0 2024-11-17T01:27:27,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742047_1223 (size=39949) 2024-11-17T01:27:27,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806907455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806907457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806907457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806907459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806907459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-17T01:27:27,534 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-17T01:27:27,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:27,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-17T01:27:27,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:27,536 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:27,537 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:27,537 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:27,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:27,666 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b77d8f7defdd4c39bcadc53df9ce44b0 2024-11-17T01:27:27,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a833bb5275bb4b80bfac79c60c8299fd is 50, key is test_row_0/B:col10/1731806846831/Put/seqid=0 2024-11-17T01:27:27,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742048_1224 (size=12301) 2024-11-17T01:27:27,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a833bb5275bb4b80bfac79c60c8299fd 2024-11-17T01:27:27,683 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/cbb1cb4be2434d368fa8cc658a65cd07 is 50, key is test_row_0/C:col10/1731806846831/Put/seqid=0 2024-11-17T01:27:27,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742049_1225 (size=12301) 2024-11-17T01:27:27,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,688 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/cbb1cb4be2434d368fa8cc658a65cd07 2024-11-17T01:27:27,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-17T01:27:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:27,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:27,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:27,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:27,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b77d8f7defdd4c39bcadc53df9ce44b0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0 2024-11-17T01:27:27,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0, entries=200, sequenceid=277, filesize=39.0 K 2024-11-17T01:27:27,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a833bb5275bb4b80bfac79c60c8299fd as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd 2024-11-17T01:27:27,700 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd, entries=150, sequenceid=277, filesize=12.0 K 2024-11-17T01:27:27,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/cbb1cb4be2434d368fa8cc658a65cd07 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07 2024-11-17T01:27:27,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07, entries=150, sequenceid=277, filesize=12.0 K 2024-11-17T01:27:27,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 873ms, sequenceid=277, compaction requested=false 2024-11-17T01:27:27,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:27,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:27,841 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-17T01:27:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:27,841 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:27:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:27,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:27,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:27,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117131d4c7c034d45a48fcbbd7eb196f5b8_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806846844/Put/seqid=0 2024-11-17T01:27:27,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742050_1226 (size=12454) 2024-11-17T01:27:27,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:27,963 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:27,976 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806907971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806907972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806907972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806907973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:27,980 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:27,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806907976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806908077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806908077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806908078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806908078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,081 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806908080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:28,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:28,258 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117131d4c7c034d45a48fcbbd7eb196f5b8_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117131d4c7c034d45a48fcbbd7eb196f5b8_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:28,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/61eea49b0aff423298f938e61b8391a7, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:28,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/61eea49b0aff423298f938e61b8391a7 is 175, key is test_row_0/A:col10/1731806846844/Put/seqid=0 2024-11-17T01:27:28,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742051_1227 (size=31255) 2024-11-17T01:27:28,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806908281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806908281, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806908282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806908282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806908283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,583 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806908583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806908585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806908586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806908587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:28,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806908587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:28,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:28,671 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=294, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/61eea49b0aff423298f938e61b8391a7 2024-11-17T01:27:28,678 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/58aa9538bb6f4ae1bf93f438b9949b7c is 50, key is test_row_0/B:col10/1731806846844/Put/seqid=0 2024-11-17T01:27:28,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742052_1228 (size=12301) 2024-11-17T01:27:29,082 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/58aa9538bb6f4ae1bf93f438b9949b7c 2024-11-17T01:27:29,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:29,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806909087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:29,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806909089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,092 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:29,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806909089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,095 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806909094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:29,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806909094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,123 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/dc895e4e096e46a1a250e3af789076d2 is 50, key is test_row_0/C:col10/1731806846844/Put/seqid=0 2024-11-17T01:27:29,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742053_1229 (size=12301) 2024-11-17T01:27:29,527 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/dc895e4e096e46a1a250e3af789076d2 2024-11-17T01:27:29,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/61eea49b0aff423298f938e61b8391a7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7 2024-11-17T01:27:29,534 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7, entries=150, sequenceid=294, filesize=30.5 K 2024-11-17T01:27:29,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/58aa9538bb6f4ae1bf93f438b9949b7c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c 2024-11-17T01:27:29,539 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c, entries=150, sequenceid=294, filesize=12.0 K 2024-11-17T01:27:29,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/dc895e4e096e46a1a250e3af789076d2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2 2024-11-17T01:27:29,551 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2, entries=150, sequenceid=294, filesize=12.0 K 2024-11-17T01:27:29,552 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1711ms, sequenceid=294, compaction requested=true 2024-11-17T01:27:29,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:29,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:29,552 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-17T01:27:29,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-17T01:27:29,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-17T01:27:29,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0160 sec 2024-11-17T01:27:29,555 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 2.0190 sec 2024-11-17T01:27:29,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-17T01:27:29,640 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-17T01:27:29,641 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:29,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-17T01:27:29,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:29,642 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:29,642 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:29,642 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:29,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:29,794 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:29,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-17T01:27:29,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:29,794 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:29,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:29,801 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411178375868b46c343f48d0abde3c20908ca_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806847972/Put/seqid=0 2024-11-17T01:27:29,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742054_1230 (size=12454) 2024-11-17T01:27:29,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:29,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:27:30,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:30,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:30,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806910106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806910106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806910107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806910107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,111 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806910108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:30,209 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411178375868b46c343f48d0abde3c20908ca_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178375868b46c343f48d0abde3c20908ca_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:30,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/fb9713a4d04d421d979b46af4255efd6, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:30,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/fb9713a4d04d421d979b46af4255efd6 is 175, key is test_row_0/A:col10/1731806847972/Put/seqid=0 2024-11-17T01:27:30,213 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806910210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806910210, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806910212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806910212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742055_1231 (size=31255) 2024-11-17T01:27:30,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806910213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,215 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=314, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/fb9713a4d04d421d979b46af4255efd6 2024-11-17T01:27:30,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/25a3b931c218467dafc2f7c9f1efacee is 50, key is test_row_0/B:col10/1731806847972/Put/seqid=0 2024-11-17T01:27:30,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742056_1232 (size=12301) 2024-11-17T01:27:30,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:30,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806910416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,417 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806910416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806910416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806910416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806910419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,626 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/25a3b931c218467dafc2f7c9f1efacee 2024-11-17T01:27:30,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8fd9e06abaad4a39be624fae1d2d01c9 is 50, key is test_row_0/C:col10/1731806847972/Put/seqid=0 2024-11-17T01:27:30,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742057_1233 (size=12301) 2024-11-17T01:27:30,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806910719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806910719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806910720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806910720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:30,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806910722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:30,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:31,037 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=314 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8fd9e06abaad4a39be624fae1d2d01c9 2024-11-17T01:27:31,041 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/fb9713a4d04d421d979b46af4255efd6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6 2024-11-17T01:27:31,045 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6, entries=150, sequenceid=314, filesize=30.5 K 2024-11-17T01:27:31,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/25a3b931c218467dafc2f7c9f1efacee as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee 2024-11-17T01:27:31,049 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee, entries=150, sequenceid=314, filesize=12.0 K 2024-11-17T01:27:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/8fd9e06abaad4a39be624fae1d2d01c9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9 2024-11-17T01:27:31,054 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9, entries=150, sequenceid=314, filesize=12.0 K 2024-11-17T01:27:31,055 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1261ms, sequenceid=314, compaction requested=true 2024-11-17T01:27:31,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:31,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:31,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-17T01:27:31,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-17T01:27:31,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-17T01:27:31,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4140 sec 2024-11-17T01:27:31,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 1.4170 sec 2024-11-17T01:27:31,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:31,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:31,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:31,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f43ff9dbdcb942b6a509168dce1c7a4c_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:31,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742058_1234 (size=14994) 2024-11-17T01:27:31,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806911249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,254 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806911249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806911250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806911250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806911254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806911355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806911355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,358 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806911356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806911356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806911361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,561 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806911560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806911559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806911560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806911560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806911565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:31,646 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f43ff9dbdcb942b6a509168dce1c7a4c_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f43ff9dbdcb942b6a509168dce1c7a4c_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:31,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/4c8b8fb28a8b4e399e06bb084edf0ad2, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:31,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/4c8b8fb28a8b4e399e06bb084edf0ad2 is 175, key is test_row_0/A:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:31,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742059_1235 (size=39949) 2024-11-17T01:27:31,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-17T01:27:31,746 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-17T01:27:31,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:31,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-17T01:27:31,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:31,748 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:31,748 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:31,748 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:31,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:31,867 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806911864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806911865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806911865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,868 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806911865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:31,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806911871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,899 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:31,900 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-17T01:27:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:31,900 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:31,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:31,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:32,052 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-17T01:27:32,052 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=331, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/4c8b8fb28a8b4e399e06bb084edf0ad2 2024-11-17T01:27:32,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:32,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/1258a8c94c3d47ee8a015ff1d88872a9 is 50, key is test_row_0/B:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:32,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742060_1236 (size=12301) 2024-11-17T01:27:32,064 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/1258a8c94c3d47ee8a015ff1d88872a9 2024-11-17T01:27:32,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/643a6f39c6da4ecca13d75a632df0dad is 50, key is test_row_0/C:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:32,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742061_1237 (size=12301) 2024-11-17T01:27:32,204 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-17T01:27:32,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:32,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,205 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:32,357 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-17T01:27:32,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:32,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:32,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:32,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806912370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:32,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806912371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:32,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806912371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:32,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806912373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:32,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806912375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=331 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/643a6f39c6da4ecca13d75a632df0dad 2024-11-17T01:27:32,479 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/4c8b8fb28a8b4e399e06bb084edf0ad2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2 2024-11-17T01:27:32,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2, entries=200, sequenceid=331, filesize=39.0 K 2024-11-17T01:27:32,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/1258a8c94c3d47ee8a015ff1d88872a9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9 2024-11-17T01:27:32,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9, entries=150, sequenceid=331, filesize=12.0 K 2024-11-17T01:27:32,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/643a6f39c6da4ecca13d75a632df0dad as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad 2024-11-17T01:27:32,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad, entries=150, sequenceid=331, filesize=12.0 K 2024-11-17T01:27:32,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1267ms, sequenceid=331, compaction requested=true 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:32,494 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:32,494 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:32,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:32,495 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61935 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-17T01:27:32,495 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 174093 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-17T01:27:32,495 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:32,495 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:32,495 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,495 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,496 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/049da9e9548c4c8280eb06b903eee690, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=60.5 K 2024-11-17T01:27:32,496 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=170.0 K 2024-11-17T01:27:32,496 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2] 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 049da9e9548c4c8280eb06b903eee690, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844065 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 27afb77ad1a145ffb3a989b28ba7805f, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844065 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a833bb5275bb4b80bfac79c60c8299fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731806846220 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b77d8f7defdd4c39bcadc53df9ce44b0, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731806846220 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 58aa9538bb6f4ae1bf93f438b9949b7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1731806846844 2024-11-17T01:27:32,496 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61eea49b0aff423298f938e61b8391a7, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1731806846844 2024-11-17T01:27:32,497 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 25a3b931c218467dafc2f7c9f1efacee, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731806847969 2024-11-17T01:27:32,497 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb9713a4d04d421d979b46af4255efd6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731806847969 2024-11-17T01:27:32,497 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c8b8fb28a8b4e399e06bb084edf0ad2, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:32,497 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1258a8c94c3d47ee8a015ff1d88872a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:32,504 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:32,506 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:32,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/6091ad0e7f894a599b012982fea26d18 is 50, key is test_row_0/B:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:32,509 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:32,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,510 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:32,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:32,511 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117ad45399087bd448fa585ff1689af39bc_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:32,513 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117ad45399087bd448fa585ff1689af39bc_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:32,513 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ad45399087bd448fa585ff1689af39bc_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:32,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742062_1238 (size=13051) 2024-11-17T01:27:32,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742063_1239 (size=4469) 2024-11-17T01:27:32,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172bec423d3e684a71832ff427668315b1_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806851253/Put/seqid=0 2024-11-17T01:27:32,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742064_1240 (size=12454) 2024-11-17T01:27:32,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:32,920 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/6091ad0e7f894a599b012982fea26d18 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6091ad0e7f894a599b012982fea26d18 2024-11-17T01:27:32,921 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#199 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:32,921 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7a65ff0f19854eec9f5be66654f1369e is 175, key is test_row_0/A:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:32,924 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into 6091ad0e7f894a599b012982fea26d18(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:32,924 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:32,924 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=11, startTime=1731806852494; duration=0sec 2024-11-17T01:27:32,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742065_1241 (size=32005) 2024-11-17T01:27:32,925 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:32,925 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:32,925 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-17T01:27:32,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61935 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-17T01:27:32,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:32,926 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:32,926 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/fd42316f68c844cca66e19441a300e5f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=60.5 K 2024-11-17T01:27:32,927 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting fd42316f68c844cca66e19441a300e5f, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1731806844065 2024-11-17T01:27:32,927 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting cbb1cb4be2434d368fa8cc658a65cd07, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1731806846220 2024-11-17T01:27:32,927 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting dc895e4e096e46a1a250e3af789076d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1731806846844 2024-11-17T01:27:32,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:32,928 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fd9e06abaad4a39be624fae1d2d01c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=314, earliestPutTs=1731806847969 2024-11-17T01:27:32,928 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 643a6f39c6da4ecca13d75a632df0dad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:32,931 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172bec423d3e684a71832ff427668315b1_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172bec423d3e684a71832ff427668315b1_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:32,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b225e4d89759424db8f25050937c807f, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:32,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b225e4d89759424db8f25050937c807f is 175, key is test_row_0/A:col10/1731806851253/Put/seqid=0 2024-11-17T01:27:32,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742066_1242 (size=31255) 2024-11-17T01:27:32,940 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#201 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:32,941 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/319990cbe6244043b6bf47f8ca028afa is 50, key is test_row_0/C:col10/1731806850105/Put/seqid=0 2024-11-17T01:27:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742067_1243 (size=13051) 2024-11-17T01:27:33,329 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/7a65ff0f19854eec9f5be66654f1369e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e 2024-11-17T01:27:33,333 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into 7a65ff0f19854eec9f5be66654f1369e(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:33,334 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:33,334 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=11, startTime=1731806852494; duration=0sec 2024-11-17T01:27:33,334 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:33,334 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:33,336 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=350, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b225e4d89759424db8f25050937c807f 2024-11-17T01:27:33,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/0c2a31970fe14890af0b2acac38187e9 is 50, key is test_row_0/B:col10/1731806851253/Put/seqid=0 2024-11-17T01:27:33,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742068_1244 (size=12301) 2024-11-17T01:27:33,350 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/319990cbe6244043b6bf47f8ca028afa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/319990cbe6244043b6bf47f8ca028afa 2024-11-17T01:27:33,354 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into 319990cbe6244043b6bf47f8ca028afa(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:33,354 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:33,354 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=11, startTime=1731806852494; duration=0sec 2024-11-17T01:27:33,354 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:33,354 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:33,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:33,381 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:33,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806913388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806913388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,391 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806913389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806913390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806913390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806913491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806913491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806913492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806913494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806913494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52648 deadline: 1731806913694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52590 deadline: 1731806913694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,697 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52664 deadline: 1731806913697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52594 deadline: 1731806913696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:33,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:52628 deadline: 1731806913698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:33,749 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/0c2a31970fe14890af0b2acac38187e9 2024-11-17T01:27:33,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/55940190ae0142eba7847d8c40e2e61c is 50, key is test_row_0/C:col10/1731806851253/Put/seqid=0 2024-11-17T01:27:33,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742069_1245 (size=12301) 2024-11-17T01:27:33,763 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/55940190ae0142eba7847d8c40e2e61c 2024-11-17T01:27:33,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b225e4d89759424db8f25050937c807f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f 2024-11-17T01:27:33,771 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f, entries=150, sequenceid=350, filesize=30.5 K 2024-11-17T01:27:33,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/0c2a31970fe14890af0b2acac38187e9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9 2024-11-17T01:27:33,776 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9, entries=150, sequenceid=350, filesize=12.0 K 2024-11-17T01:27:33,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/55940190ae0142eba7847d8c40e2e61c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c 2024-11-17T01:27:33,781 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c, entries=150, sequenceid=350, filesize=12.0 K 2024-11-17T01:27:33,782 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1271ms, sequenceid=350, compaction requested=false 2024-11-17T01:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:33,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-17T01:27:33,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-17T01:27:33,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-17T01:27:33,784 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0350 sec 2024-11-17T01:27:33,785 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 2.0380 sec 2024-11-17T01:27:33,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-17T01:27:33,852 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-17T01:27:33,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:33,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-17T01:27:33,854 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:33,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:33,855 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:33,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:33,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:34,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:34,000 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:34,001 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:34,004 DEBUG [Thread-726 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x176c5c1b to 127.0.0.1:63898 2024-11-17T01:27:34,005 DEBUG [Thread-726 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,005 DEBUG [Thread-733 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:63898 2024-11-17T01:27:34,005 DEBUG [Thread-733 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,006 DEBUG [Thread-730 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:63898 2024-11-17T01:27:34,006 DEBUG [Thread-730 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,006 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,007 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,007 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,007 DEBUG [Thread-735 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:63898 2024-11-17T01:27:34,007 DEBUG [Thread-735 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,008 DEBUG [Thread-739 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:63898 2024-11-17T01:27:34,008 DEBUG [Thread-739 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,009 DEBUG [Thread-724 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:63898 2024-11-17T01:27:34,009 DEBUG [Thread-728 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:63898 2024-11-17T01:27:34,009 DEBUG [Thread-728 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,009 DEBUG [Thread-724 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117229565f3aa5849b0b86e28dd6a8ba28b_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:34,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,011 DEBUG [Thread-722 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c826820 to 127.0.0.1:63898 2024-11-17T01:27:34,011 DEBUG [Thread-722 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742070_1246 (size=17534) 2024-11-17T01:27:34,015 DEBUG [Thread-737 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:63898 2024-11-17T01:27:34,015 DEBUG [Thread-737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:34,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:34,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,162 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,315 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,415 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:34,424 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117229565f3aa5849b0b86e28dd6a8ba28b_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117229565f3aa5849b0b86e28dd6a8ba28b_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:34,425 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/f261698db32842738db27bf4f28e8dd3, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:34,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/f261698db32842738db27bf4f28e8dd3 is 175, key is test_row_0/A:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:34,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742071_1247 (size=48639) 2024-11-17T01:27:34,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:34,468 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,469 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,622 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,833 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/f261698db32842738db27bf4f28e8dd3 2024-11-17T01:27:34,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a0cbb70f11a84ee7a04e0fa657fa22de is 50, key is test_row_0/B:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:34,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742072_1248 (size=12301) 2024-11-17T01:27:34,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:34,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:34,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:34,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:34,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:34,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:35,084 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:35,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:35,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:35,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,241 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:35,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:35,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:35,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,244 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a0cbb70f11a84ee7a04e0fa657fa22de 2024-11-17T01:27:35,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/ffa28bd3041145c282f7acb83fc3fe5b is 50, key is test_row_0/C:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:35,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742073_1249 (size=12301) 2024-11-17T01:27:35,399 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:35,400 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:35,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:35,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,401 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,556 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:35,557 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:35,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. as already flushing 2024-11-17T01:27:35,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:35,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/ffa28bd3041145c282f7acb83fc3fe5b 2024-11-17T01:27:35,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/f261698db32842738db27bf4f28e8dd3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3 2024-11-17T01:27:35,692 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3, entries=250, sequenceid=372, filesize=47.5 K 2024-11-17T01:27:35,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/a0cbb70f11a84ee7a04e0fa657fa22de as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de 2024-11-17T01:27:35,696 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de, entries=150, sequenceid=372, filesize=12.0 K 2024-11-17T01:27:35,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/ffa28bd3041145c282f7acb83fc3fe5b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b 2024-11-17T01:27:35,701 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b, entries=150, sequenceid=372, filesize=12.0 K 2024-11-17T01:27:35,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=53.67 KB/54960 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 1702ms, sequenceid=372, compaction requested=true 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:35,702 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:35,702 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:35,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 271e8f2d6ca47e8f7f50d0cb275120f4:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:35,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111899 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/B is initiating minor compaction (all files) 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/A is initiating minor compaction (all files) 2024-11-17T01:27:35,704 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/B in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,704 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/A in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,704 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6091ad0e7f894a599b012982fea26d18, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=36.8 K 2024-11-17T01:27:35,704 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=109.3 K 2024-11-17T01:27:35,704 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3] 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 6091ad0e7f894a599b012982fea26d18, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:35,704 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a65ff0f19854eec9f5be66654f1369e, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:35,705 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c2a31970fe14890af0b2acac38187e9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731806851240 2024-11-17T01:27:35,705 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b225e4d89759424db8f25050937c807f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731806851240 2024-11-17T01:27:35,705 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a0cbb70f11a84ee7a04e0fa657fa22de, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1731806853388 2024-11-17T01:27:35,705 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f261698db32842738db27bf4f28e8dd3, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1731806853386 2024-11-17T01:27:35,712 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:35,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-17T01:27:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:35,713 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing 271e8f2d6ca47e8f7f50d0cb275120f4 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:35,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=A 2024-11-17T01:27:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=B 2024-11-17T01:27:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 271e8f2d6ca47e8f7f50d0cb275120f4, store=C 2024-11-17T01:27:35,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:35,716 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#B#compaction#207 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:35,717 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/b4e6ff61ce68412e88fa633f5fd4b2fa is 50, key is test_row_0/B:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:35,717 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:35,719 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411172d484308ba184159b26635dcd9928682_271e8f2d6ca47e8f7f50d0cb275120f4 store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:35,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742074_1250 (size=13153) 2024-11-17T01:27:35,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117cc01c9f9c5e840cc9d4058440cb25cac_271e8f2d6ca47e8f7f50d0cb275120f4 is 50, key is test_row_0/A:col10/1731806854004/Put/seqid=0 2024-11-17T01:27:35,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742075_1251 (size=12454) 2024-11-17T01:27:35,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:35,740 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117cc01c9f9c5e840cc9d4058440cb25cac_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cc01c9f9c5e840cc9d4058440cb25cac_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:35,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b09473feedb8478bbf13e24c00042eb2, store: [table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:35,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b09473feedb8478bbf13e24c00042eb2 is 175, key is test_row_0/A:col10/1731806854004/Put/seqid=0 2024-11-17T01:27:35,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742076_1252 (size=31255) 2024-11-17T01:27:35,749 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411172d484308ba184159b26635dcd9928682_271e8f2d6ca47e8f7f50d0cb275120f4, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:35,749 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172d484308ba184159b26635dcd9928682_271e8f2d6ca47e8f7f50d0cb275120f4 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=271e8f2d6ca47e8f7f50d0cb275120f4] 2024-11-17T01:27:35,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742077_1253 (size=4469) 2024-11-17T01:27:35,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:36,090 DEBUG [master/04f7e7347dc7:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 60ebde759866ffdd749c0e1b676599ae changed from -1.0 to 0.0, refreshing cache 2024-11-17T01:27:36,137 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/b4e6ff61ce68412e88fa633f5fd4b2fa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/b4e6ff61ce68412e88fa633f5fd4b2fa 2024-11-17T01:27:36,144 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/B of 271e8f2d6ca47e8f7f50d0cb275120f4 into b4e6ff61ce68412e88fa633f5fd4b2fa(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:36,144 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:36,145 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/B, priority=13, startTime=1731806855702; duration=0sec 2024-11-17T01:27:36,145 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:36,145 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:B 2024-11-17T01:27:36,145 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:36,146 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:36,146 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 271e8f2d6ca47e8f7f50d0cb275120f4/C is initiating minor compaction (all files) 2024-11-17T01:27:36,146 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 271e8f2d6ca47e8f7f50d0cb275120f4/C in TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:36,146 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/319990cbe6244043b6bf47f8ca028afa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp, totalSize=36.8 K 2024-11-17T01:27:36,147 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 319990cbe6244043b6bf47f8ca028afa, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=331, earliestPutTs=1731806850105 2024-11-17T01:27:36,147 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 55940190ae0142eba7847d8c40e2e61c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731806851240 2024-11-17T01:27:36,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ffa28bd3041145c282f7acb83fc3fe5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1731806853388 2024-11-17T01:27:36,148 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=383, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b09473feedb8478bbf13e24c00042eb2 2024-11-17T01:27:36,154 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#A#compaction#208 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:36,154 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e63677bc08384bfd90e05750efc71477 is 175, key is test_row_0/A:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:36,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/71670a00248441ab85ebb0ff9ed443be is 50, key is test_row_0/B:col10/1731806854004/Put/seqid=0 2024-11-17T01:27:36,155 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 271e8f2d6ca47e8f7f50d0cb275120f4#C#compaction#211 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:36,155 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/6e32a8918bb441b6863b96aa641ae229 is 50, key is test_row_0/C:col10/1731806854000/Put/seqid=0 2024-11-17T01:27:36,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742079_1255 (size=12301) 2024-11-17T01:27:36,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742078_1254 (size=32107) 2024-11-17T01:27:36,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742080_1256 (size=13153) 2024-11-17T01:27:36,563 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/71670a00248441ab85ebb0ff9ed443be 2024-11-17T01:27:36,572 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/e63677bc08384bfd90e05750efc71477 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e63677bc08384bfd90e05750efc71477 2024-11-17T01:27:36,574 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/6e32a8918bb441b6863b96aa641ae229 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6e32a8918bb441b6863b96aa641ae229 2024-11-17T01:27:36,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/6cc980b2b2cd4965a0f83765428fabe2 is 50, key is test_row_0/C:col10/1731806854004/Put/seqid=0 2024-11-17T01:27:36,577 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/A of 271e8f2d6ca47e8f7f50d0cb275120f4 into e63677bc08384bfd90e05750efc71477(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:36,577 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:36,577 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/A, priority=13, startTime=1731806855702; duration=0sec 2024-11-17T01:27:36,578 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:36,578 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:A 2024-11-17T01:27:36,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742081_1257 (size=12301) 2024-11-17T01:27:36,578 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 271e8f2d6ca47e8f7f50d0cb275120f4/C of 271e8f2d6ca47e8f7f50d0cb275120f4 into 6e32a8918bb441b6863b96aa641ae229(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:36,578 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:36,578 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4., storeName=271e8f2d6ca47e8f7f50d0cb275120f4/C, priority=13, startTime=1731806855702; duration=0sec 2024-11-17T01:27:36,579 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:36,579 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=383 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/6cc980b2b2cd4965a0f83765428fabe2 2024-11-17T01:27:36,579 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 271e8f2d6ca47e8f7f50d0cb275120f4:C 2024-11-17T01:27:36,582 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/A/b09473feedb8478bbf13e24c00042eb2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b09473feedb8478bbf13e24c00042eb2 2024-11-17T01:27:36,586 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b09473feedb8478bbf13e24c00042eb2, entries=150, sequenceid=383, filesize=30.5 K 2024-11-17T01:27:36,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/B/71670a00248441ab85ebb0ff9ed443be as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/71670a00248441ab85ebb0ff9ed443be 2024-11-17T01:27:36,590 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/71670a00248441ab85ebb0ff9ed443be, entries=150, sequenceid=383, filesize=12.0 K 2024-11-17T01:27:36,591 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/.tmp/C/6cc980b2b2cd4965a0f83765428fabe2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6cc980b2b2cd4965a0f83765428fabe2 2024-11-17T01:27:36,595 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6cc980b2b2cd4965a0f83765428fabe2, entries=150, sequenceid=383, filesize=12.0 K 2024-11-17T01:27:36,595 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=0 B/0 for 271e8f2d6ca47e8f7f50d0cb275120f4 in 882ms, sequenceid=383, compaction requested=false 2024-11-17T01:27:36,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:36,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:36,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-17T01:27:36,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-17T01:27:36,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-17T01:27:36,598 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7420 sec 2024-11-17T01:27:36,599 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 2.7450 sec 2024-11-17T01:27:37,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-17T01:27:37,961 INFO [Thread-732 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 61 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 58 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 62 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 59 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5853 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5795 2024-11-17T01:27:37,962 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:27:37,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2511 2024-11-17T01:27:37,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7533 rows 2024-11-17T01:27:37,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2511 2024-11-17T01:27:37,963 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7532 rows 2024-11-17T01:27:37,963 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:27:37,963 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e560c7b to 127.0.0.1:63898 2024-11-17T01:27:37,963 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:27:37,967 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:27:37,968 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:27:37,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:37,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-17T01:27:37,973 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806857972"}]},"ts":"1731806857972"} 2024-11-17T01:27:37,974 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:27:38,009 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:27:38,010 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:27:38,012 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, UNASSIGN}] 2024-11-17T01:27:38,013 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, UNASSIGN 2024-11-17T01:27:38,014 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:38,016 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:27:38,016 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:38,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-17T01:27:38,168 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:38,170 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,170 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:27:38,170 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing 271e8f2d6ca47e8f7f50d0cb275120f4, disabling compactions & flushes 2024-11-17T01:27:38,170 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:38,171 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:38,171 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. after waiting 0 ms 2024-11-17T01:27:38,171 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:38,172 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3] to archive 2024-11-17T01:27:38,176 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:38,180 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/99733b78145449a6b5040b029f355184 2024-11-17T01:27:38,182 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1fa5733a71bb4b72a22e1a3b36674de6 2024-11-17T01:27:38,183 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/eee0e434926b4c45a1c584a63308f79d 2024-11-17T01:27:38,184 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ddbd52825d4d461f95e6e61df73e7235 2024-11-17T01:27:38,184 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/ff00ee24257640eba77cc7d577a77871 2024-11-17T01:27:38,185 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1a3e15c7a408475d888ae23cc7761d22 2024-11-17T01:27:38,186 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/6b54b59380d548e3bc7c4e8631f3cfc6 2024-11-17T01:27:38,187 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/241c2a2eb51044268764e37954504cc2 2024-11-17T01:27:38,188 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7ab26ed32f2045d18f95e44033eaf426 2024-11-17T01:27:38,189 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/1c3b39cc81ad463197f0fbad96efb15f 2024-11-17T01:27:38,189 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/0c954cf0808b454c9a05bfb578dce361 2024-11-17T01:27:38,190 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8708d720884649afb038d4479780b41c 2024-11-17T01:27:38,191 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/8c0f7a31d75147b2a2bdef2a2aedaef8 2024-11-17T01:27:38,192 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e660d5ddf7ab4747a6d530c7d22a887f 2024-11-17T01:27:38,192 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/999f5fd9122f4f3986a95a7dd1752351 2024-11-17T01:27:38,193 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/15972a0c519c4a02b16a2b2b8e1bd2bb 2024-11-17T01:27:38,194 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/74e394ab44ee44f098e32111c58f9e43 2024-11-17T01:27:38,195 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/27afb77ad1a145ffb3a989b28ba7805f 2024-11-17T01:27:38,196 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b77d8f7defdd4c39bcadc53df9ce44b0 2024-11-17T01:27:38,197 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/61eea49b0aff423298f938e61b8391a7 2024-11-17T01:27:38,197 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/fb9713a4d04d421d979b46af4255efd6 2024-11-17T01:27:38,198 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/4c8b8fb28a8b4e399e06bb084edf0ad2 2024-11-17T01:27:38,199 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/7a65ff0f19854eec9f5be66654f1369e 2024-11-17T01:27:38,200 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b225e4d89759424db8f25050937c807f 2024-11-17T01:27:38,201 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/f261698db32842738db27bf4f28e8dd3 2024-11-17T01:27:38,202 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/69c9effb15cd48dd9ce39599cde8f291, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/8d9a433bd04041bfbac12e4fdda7606e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/e3ca5e7ed64943848cd5af25ed12ee90, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/049da9e9548c4c8280eb06b903eee690, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6091ad0e7f894a599b012982fea26d18, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de] to archive 2024-11-17T01:27:38,203 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:38,204 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/12ea5981e5f04bae9549a3ef384fafe4 2024-11-17T01:27:38,205 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/748c3126945e4bb593d5953a56fe48dc 2024-11-17T01:27:38,206 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5dfaff13bd0f4cd687d7f7d4ddb5dc76 2024-11-17T01:27:38,206 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7c9b7c25ab7946679675ddb1113b7dcd 2024-11-17T01:27:38,207 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/86d15b44e3714f17a120ccd1dd82fc7e 2024-11-17T01:27:38,208 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6a3b0ad7b481497e985f6d4a5a7f85b2 2024-11-17T01:27:38,209 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/69c9effb15cd48dd9ce39599cde8f291 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/69c9effb15cd48dd9ce39599cde8f291 2024-11-17T01:27:38,210 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/4be8892833fc4f47bfd85e8318aa3907 2024-11-17T01:27:38,210 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/7b025d48498147c6811556e90615de21 2024-11-17T01:27:38,211 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/8d9a433bd04041bfbac12e4fdda7606e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/8d9a433bd04041bfbac12e4fdda7606e 2024-11-17T01:27:38,212 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/66191ba920314a018e1313e26cc9cff4 2024-11-17T01:27:38,213 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/3f5951a57a394ff1a843764aa23dfe51 2024-11-17T01:27:38,213 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/dd8e2109b72a4bef991d4f7edb2278f9 2024-11-17T01:27:38,214 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/e3ca5e7ed64943848cd5af25ed12ee90 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/e3ca5e7ed64943848cd5af25ed12ee90 2024-11-17T01:27:38,215 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/5298d74de42f472cb7c29c8ce4a1a1cc 2024-11-17T01:27:38,216 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/18cac9ac289c47658a30b9cc45b5ede6 2024-11-17T01:27:38,216 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/049da9e9548c4c8280eb06b903eee690 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/049da9e9548c4c8280eb06b903eee690 2024-11-17T01:27:38,217 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a752c8738f454d4e8017d223e14b0985 2024-11-17T01:27:38,218 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a833bb5275bb4b80bfac79c60c8299fd 2024-11-17T01:27:38,219 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/58aa9538bb6f4ae1bf93f438b9949b7c 2024-11-17T01:27:38,219 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/25a3b931c218467dafc2f7c9f1efacee 2024-11-17T01:27:38,220 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6091ad0e7f894a599b012982fea26d18 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/6091ad0e7f894a599b012982fea26d18 2024-11-17T01:27:38,221 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/1258a8c94c3d47ee8a015ff1d88872a9 2024-11-17T01:27:38,222 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/0c2a31970fe14890af0b2acac38187e9 2024-11-17T01:27:38,222 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/a0cbb70f11a84ee7a04e0fa657fa22de 2024-11-17T01:27:38,224 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f769181ed1c04568a9bccd320c2b7d1d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffe3facf182441b796eb3149d83e9f30, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/2dad7c71ce23414a88829726661cd87e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f0ee315619794b5695d6ddfb86fecc70, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/fd42316f68c844cca66e19441a300e5f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/319990cbe6244043b6bf47f8ca028afa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b] to archive 2024-11-17T01:27:38,224 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:27:38,226 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/72d019905ed541f8b5eb32b987366ac5 2024-11-17T01:27:38,227 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/c9725dfa1c2d4057b7d0a2e474167fde 2024-11-17T01:27:38,228 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f769181ed1c04568a9bccd320c2b7d1d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f769181ed1c04568a9bccd320c2b7d1d 2024-11-17T01:27:38,229 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/a4e9c49da4c8441c8e4a6ffbac55dbd1 2024-11-17T01:27:38,230 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/7069ea3642244a4399b940b2ec422540 2024-11-17T01:27:38,230 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/14226bbe95384a0992dba298214b6d67 2024-11-17T01:27:38,231 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffe3facf182441b796eb3149d83e9f30 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffe3facf182441b796eb3149d83e9f30 2024-11-17T01:27:38,232 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/18eeca80d08a45b4a7b320c17f510410 2024-11-17T01:27:38,233 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/37f53171996e45019f8fa5173fc6f9aa 2024-11-17T01:27:38,234 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/2dad7c71ce23414a88829726661cd87e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/2dad7c71ce23414a88829726661cd87e 2024-11-17T01:27:38,235 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/40b9a339eb9e4322915b4d6af509ff60 2024-11-17T01:27:38,236 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8b52b8b1d370467082e2ce6ce312d18b 2024-11-17T01:27:38,237 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/e0694e5e28ad48d8be05b045cc1b4fd3 2024-11-17T01:27:38,238 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f0ee315619794b5695d6ddfb86fecc70 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/f0ee315619794b5695d6ddfb86fecc70 2024-11-17T01:27:38,239 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/851f4292866543d19a1b710eee45f511 2024-11-17T01:27:38,240 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/abfda3fa50a44587bd0c0735bb932929 2024-11-17T01:27:38,240 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/fd42316f68c844cca66e19441a300e5f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/fd42316f68c844cca66e19441a300e5f 2024-11-17T01:27:38,241 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/48a3d2e8e3ad483b8a57b892768070de 2024-11-17T01:27:38,242 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/cbb1cb4be2434d368fa8cc658a65cd07 2024-11-17T01:27:38,243 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/dc895e4e096e46a1a250e3af789076d2 2024-11-17T01:27:38,244 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/8fd9e06abaad4a39be624fae1d2d01c9 2024-11-17T01:27:38,245 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/319990cbe6244043b6bf47f8ca028afa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/319990cbe6244043b6bf47f8ca028afa 2024-11-17T01:27:38,246 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/643a6f39c6da4ecca13d75a632df0dad 2024-11-17T01:27:38,247 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/55940190ae0142eba7847d8c40e2e61c 2024-11-17T01:27:38,248 DEBUG [StoreCloser-TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/ffa28bd3041145c282f7acb83fc3fe5b 2024-11-17T01:27:38,252 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits/389.seqid, newMaxSeqId=389, maxSeqId=4 2024-11-17T01:27:38,253 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4. 2024-11-17T01:27:38,253 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for 271e8f2d6ca47e8f7f50d0cb275120f4: 2024-11-17T01:27:38,254 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed 271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,255 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=271e8f2d6ca47e8f7f50d0cb275120f4, regionState=CLOSED 2024-11-17T01:27:38,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-17T01:27:38,257 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure 271e8f2d6ca47e8f7f50d0cb275120f4, server=04f7e7347dc7,37721,1731806791503 in 240 msec 2024-11-17T01:27:38,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-17T01:27:38,258 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=271e8f2d6ca47e8f7f50d0cb275120f4, UNASSIGN in 245 msec 2024-11-17T01:27:38,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-17T01:27:38,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 248 msec 2024-11-17T01:27:38,260 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806858260"}]},"ts":"1731806858260"} 2024-11-17T01:27:38,260 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:27:38,267 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:27:38,269 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 300 msec 2024-11-17T01:27:38,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-17T01:27:38,274 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-17T01:27:38,275 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:27:38,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,276 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-17T01:27:38,276 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,278 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,280 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits] 2024-11-17T01:27:38,283 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b09473feedb8478bbf13e24c00042eb2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/b09473feedb8478bbf13e24c00042eb2 2024-11-17T01:27:38,284 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e63677bc08384bfd90e05750efc71477 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/A/e63677bc08384bfd90e05750efc71477 2024-11-17T01:27:38,286 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/71670a00248441ab85ebb0ff9ed443be to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/71670a00248441ab85ebb0ff9ed443be 2024-11-17T01:27:38,287 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/b4e6ff61ce68412e88fa633f5fd4b2fa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/B/b4e6ff61ce68412e88fa633f5fd4b2fa 2024-11-17T01:27:38,289 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6cc980b2b2cd4965a0f83765428fabe2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6cc980b2b2cd4965a0f83765428fabe2 2024-11-17T01:27:38,290 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6e32a8918bb441b6863b96aa641ae229 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/C/6e32a8918bb441b6863b96aa641ae229 2024-11-17T01:27:38,292 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits/389.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4/recovered.edits/389.seqid 2024-11-17T01:27:38,292 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,292 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:27:38,293 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:27:38,293 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-17T01:27:38,296 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111708eca7f8ade84acf82d9fc946b8f2050_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111708eca7f8ade84acf82d9fc946b8f2050_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,297 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117131d4c7c034d45a48fcbbd7eb196f5b8_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117131d4c7c034d45a48fcbbd7eb196f5b8_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,298 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117229565f3aa5849b0b86e28dd6a8ba28b_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117229565f3aa5849b0b86e28dd6a8ba28b_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,299 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111722b8d707850b4c8ca57bdf2650764306_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111722b8d707850b4c8ca57bdf2650764306_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,300 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111725ae920c2a1a469e950cf80cc3453ffd_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111725ae920c2a1a469e950cf80cc3453ffd_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,300 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172bec423d3e684a71832ff427668315b1_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172bec423d3e684a71832ff427668315b1_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,301 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117437e874a709f4ad3b0250ef58505861f_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117437e874a709f4ad3b0250ef58505861f_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,302 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411175bef286249174c68aa25c1df4a6a8f35_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411175bef286249174c68aa25c1df4a6a8f35_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,303 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117637583c1ccf74682a230a6a39f880841_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117637583c1ccf74682a230a6a39f880841_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,304 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178375868b46c343f48d0abde3c20908ca_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178375868b46c343f48d0abde3c20908ca_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,305 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117994da934157a4614acf010097cd2de88_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117994da934157a4614acf010097cd2de88_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,305 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117caca58770bc448788d3ddabd868488ac_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117caca58770bc448788d3ddabd868488ac_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,306 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cc01c9f9c5e840cc9d4058440cb25cac_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cc01c9f9c5e840cc9d4058440cb25cac_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,307 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cca251fcd7b04ed0a9af840f8bea4bc3_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117cca251fcd7b04ed0a9af840f8bea4bc3_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,308 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117deed8a201479472292b34c982fbee194_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117deed8a201479472292b34c982fbee194_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,309 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f35a5725d3be4b83b7eb23f1308ce6e2_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f35a5725d3be4b83b7eb23f1308ce6e2_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,310 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f3694aafd4cf45e19fa0b417ff728e11_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f3694aafd4cf45e19fa0b417ff728e11_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,311 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f37c3b649d0b4ef1849c61b54eae2e62_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f37c3b649d0b4ef1849c61b54eae2e62_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,312 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f43ff9dbdcb942b6a509168dce1c7a4c_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f43ff9dbdcb942b6a509168dce1c7a4c_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,313 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f4e06c7a8acb4a0ea1f646200eedd570_271e8f2d6ca47e8f7f50d0cb275120f4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f4e06c7a8acb4a0ea1f646200eedd570_271e8f2d6ca47e8f7f50d0cb275120f4 2024-11-17T01:27:38,313 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:27:38,315 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,317 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:27:38,319 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:27:38,320 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,320 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:27:38,320 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806858320"}]},"ts":"9223372036854775807"} 2024-11-17T01:27:38,322 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:27:38,322 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 271e8f2d6ca47e8f7f50d0cb275120f4, NAME => 'TestAcidGuarantees,,1731806830650.271e8f2d6ca47e8f7f50d0cb275120f4.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:27:38,322 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:27:38,322 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806858322"}]},"ts":"9223372036854775807"} 2024-11-17T01:27:38,324 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:27:38,335 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 60 msec 2024-11-17T01:27:38,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-17T01:27:38,378 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-17T01:27:38,394 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=241 (was 239) Potentially hanging thread: region-location-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1280237433_22 at /127.0.0.1:34212 [Waiting for operation #821] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1280237433_22 at /127.0.0.1:43154 [Waiting for operation #1019] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_137525728_22 at /127.0.0.1:43236 [Waiting for operation #990] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_137525728_22 at /127.0.0.1:34204 [Waiting for operation #839] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x526f2908-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=463 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=313 (was 282) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3916 (was 4053) 2024-11-17T01:27:38,403 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=241, OpenFileDescriptor=463, MaxFileDescriptor=1048576, SystemLoadAverage=313, ProcessCount=11, AvailableMemoryMB=3915 2024-11-17T01:27:38,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:27:38,404 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:27:38,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:27:38,406 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:27:38,406 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:38,406 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-17T01:27:38,407 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:27:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:38,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742082_1258 (size=960) 2024-11-17T01:27:38,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:38,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:38,819 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:27:38,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742083_1259 (size=53) 2024-11-17T01:27:39,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:39,231 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:27:39,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 8d60f9787865bae9bdb70c9c59e45f35, disabling compactions & flushes 2024-11-17T01:27:39,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. after waiting 0 ms 2024-11-17T01:27:39,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,232 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,232 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:39,260 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:27:39,261 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806859260"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806859260"}]},"ts":"1731806859260"} 2024-11-17T01:27:39,262 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:27:39,264 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:27:39,264 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806859264"}]},"ts":"1731806859264"} 2024-11-17T01:27:39,266 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:27:39,301 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, ASSIGN}] 2024-11-17T01:27:39,302 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, ASSIGN 2024-11-17T01:27:39,303 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:27:39,454 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=8d60f9787865bae9bdb70c9c59e45f35, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:39,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure 8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:27:39,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:39,610 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:39,619 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,619 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:27:39,619 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,619 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:27:39,620 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,620 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,621 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,622 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:39,622 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d60f9787865bae9bdb70c9c59e45f35 columnFamilyName A 2024-11-17T01:27:39,622 DEBUG [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:39,623 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(327): Store=8d60f9787865bae9bdb70c9c59e45f35/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:39,623 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,624 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:39,624 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d60f9787865bae9bdb70c9c59e45f35 columnFamilyName B 2024-11-17T01:27:39,624 DEBUG [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:39,624 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(327): Store=8d60f9787865bae9bdb70c9c59e45f35/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:39,625 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,625 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:27:39,626 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8d60f9787865bae9bdb70c9c59e45f35 columnFamilyName C 2024-11-17T01:27:39,626 DEBUG [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:27:39,626 INFO [StoreOpener-8d60f9787865bae9bdb70c9c59e45f35-1 {}] regionserver.HStore(327): Store=8d60f9787865bae9bdb70c9c59e45f35/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:27:39,626 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,627 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,627 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,629 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:27:39,630 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:39,632 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:27:39,633 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened 8d60f9787865bae9bdb70c9c59e45f35; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68097778, jitterRate=0.014735966920852661}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:27:39,634 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:39,634 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., pid=68, masterSystemTime=1731806859610 2024-11-17T01:27:39,635 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,635 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:39,636 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=8d60f9787865bae9bdb70c9c59e45f35, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:39,637 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-17T01:27:39,638 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure 8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 in 181 msec 2024-11-17T01:27:39,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-17T01:27:39,639 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, ASSIGN in 337 msec 2024-11-17T01:27:39,639 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:27:39,640 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806859639"}]},"ts":"1731806859639"} 2024-11-17T01:27:39,640 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:27:39,651 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:27:39,652 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2470 sec 2024-11-17T01:27:40,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-17T01:27:40,518 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-17T01:27:40,524 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53bfce45 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@64dc42d9 2024-11-17T01:27:40,568 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58341641, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,570 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,572 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,574 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:27:40,576 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41854, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:27:40,580 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a569490 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c1ac389 2024-11-17T01:27:40,593 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44645c55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6862e3ce to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@28e73c0 2024-11-17T01:27:40,608 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ee0130, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d296fed to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7c480dfb 2024-11-17T01:27:40,618 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683b64c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,621 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x08d0caa5 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34cb3991 2024-11-17T01:27:40,633 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e55eb7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,635 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x43f04e0e to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e9ae050 2024-11-17T01:27:40,643 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a703d2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,646 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2fef31f8 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@14ed1e44 2024-11-17T01:27:40,658 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b04266, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,660 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0eb04aeb to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72537a47 2024-11-17T01:27:40,669 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@88aa519, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,671 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6a0e9c8f to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36642cb 2024-11-17T01:27:40,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e998dd3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,685 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d68f787 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c299cfb 2024-11-17T01:27:40,693 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e4c79b8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,695 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x10e6bf6a to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@605827c9 2024-11-17T01:27:40,702 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d1403c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:27:40,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-17T01:27:40,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:40,708 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:40,709 DEBUG [hconnection-0x606487b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,709 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:40,709 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:40,710 DEBUG [hconnection-0x7cebf29b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,710 DEBUG [hconnection-0x45f1c47d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,711 DEBUG [hconnection-0x62ecae02-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,711 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,711 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,711 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46400, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,711 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46408, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,712 DEBUG [hconnection-0x587bab53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,715 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,715 DEBUG [hconnection-0x7caf21b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,715 DEBUG [hconnection-0x7496089a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,716 DEBUG [hconnection-0x43b46f20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,716 DEBUG [hconnection-0x7605748a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,716 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,717 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46454, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,717 DEBUG [hconnection-0x80fa607-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:27:40,717 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46456, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,718 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46440, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,719 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46472, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:27:40,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:40,723 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:40,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:40,748 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/281f6e5fd4b849a985685cd3254689c1 is 50, key is test_row_0/A:col10/1731806860720/Put/seqid=0 2024-11-17T01:27:40,748 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806920745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806920746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806920746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806920747, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806920748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742084_1260 (size=12001) 2024-11-17T01:27:40,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/281f6e5fd4b849a985685cd3254689c1 2024-11-17T01:27:40,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:40,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/c81835745daa4d839ad7947450c0579c is 50, key is test_row_0/B:col10/1731806860720/Put/seqid=0 2024-11-17T01:27:40,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742085_1261 (size=12001) 2024-11-17T01:27:40,829 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/c81835745daa4d839ad7947450c0579c 2024-11-17T01:27:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806920850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806920851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806920851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806920852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:40,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806920852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,857 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/60d53496adc041fba3ac4266c20a4f8f is 50, key is test_row_0/C:col10/1731806860720/Put/seqid=0 2024-11-17T01:27:40,862 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:40,862 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-17T01:27:40,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:40,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:40,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:40,862 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:40,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:40,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:40,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742086_1262 (size=12001) 2024-11-17T01:27:41,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:41,015 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,015 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-17T01:27:41,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:41,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:41,015 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:41,015 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806921052, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806921055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806921055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806921056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806921056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,167 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,167 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-17T01:27:41,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:41,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:41,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:41,168 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,168 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:41,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/60d53496adc041fba3ac4266c20a4f8f 2024-11-17T01:27:41,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/281f6e5fd4b849a985685cd3254689c1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1 2024-11-17T01:27:41,272 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1, entries=150, sequenceid=13, filesize=11.7 K 2024-11-17T01:27:41,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/c81835745daa4d839ad7947450c0579c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c 2024-11-17T01:27:41,277 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c, entries=150, sequenceid=13, filesize=11.7 K 2024-11-17T01:27:41,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/60d53496adc041fba3ac4266c20a4f8f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f 2024-11-17T01:27:41,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f, entries=150, sequenceid=13, filesize=11.7 K 2024-11-17T01:27:41,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 8d60f9787865bae9bdb70c9c59e45f35 in 558ms, sequenceid=13, compaction requested=false 2024-11-17T01:27:41,282 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:41,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:41,319 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,320 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-17T01:27:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:41,320 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:27:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:41,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:41,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:41,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:41,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:41,325 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/3bbe8998f2e74cf98e940f9c1025b1b0 is 50, key is test_row_0/A:col10/1731806860743/Put/seqid=0 2024-11-17T01:27:41,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742087_1263 (size=12001) 2024-11-17T01:27:41,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:41,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806921362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806921363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806921364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806921364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806921365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806921466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,467 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806921466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806921467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806921467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806921467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806921668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,669 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806921669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806921670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806921670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,671 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806921670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,730 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/3bbe8998f2e74cf98e940f9c1025b1b0 2024-11-17T01:27:41,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f131c4d669b344f8b0405745a113fba6 is 50, key is test_row_0/B:col10/1731806860743/Put/seqid=0 2024-11-17T01:27:41,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742088_1264 (size=12001) 2024-11-17T01:27:41,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:41,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806921970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,973 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806921971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806921972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806921973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:41,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:41,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806921974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,141 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f131c4d669b344f8b0405745a113fba6 2024-11-17T01:27:42,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6d046b90d15f4af59b6549dcd7006129 is 50, key is test_row_0/C:col10/1731806860743/Put/seqid=0 2024-11-17T01:27:42,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742089_1265 (size=12001) 2024-11-17T01:27:42,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806922474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806922474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806922475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,476 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:42,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806922475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:42,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806922476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,551 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6d046b90d15f4af59b6549dcd7006129 2024-11-17T01:27:42,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/3bbe8998f2e74cf98e940f9c1025b1b0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0 2024-11-17T01:27:42,558 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0, entries=150, sequenceid=37, filesize=11.7 K 2024-11-17T01:27:42,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f131c4d669b344f8b0405745a113fba6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6 2024-11-17T01:27:42,562 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6, entries=150, sequenceid=37, filesize=11.7 K 2024-11-17T01:27:42,563 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6d046b90d15f4af59b6549dcd7006129 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129 2024-11-17T01:27:42,567 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129, entries=150, sequenceid=37, filesize=11.7 K 2024-11-17T01:27:42,568 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 8d60f9787865bae9bdb70c9c59e45f35 in 1248ms, sequenceid=37, compaction requested=false 2024-11-17T01:27:42,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:42,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:42,568 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-17T01:27:42,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-17T01:27:42,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-17T01:27:42,570 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8600 sec 2024-11-17T01:27:42,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 1.8630 sec 2024-11-17T01:27:42,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-17T01:27:42,812 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-17T01:27:42,813 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-17T01:27:42,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:42,814 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:42,815 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:42,815 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:42,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:42,966 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:42,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-17T01:27:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:42,967 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:27:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:42,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:42,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:42,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:42,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/a0efb2fe6fa7474c94ace387042ea164 is 50, key is test_row_0/A:col10/1731806861364/Put/seqid=0 2024-11-17T01:27:42,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742090_1266 (size=12001) 2024-11-17T01:27:43,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:43,376 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/a0efb2fe6fa7474c94ace387042ea164 2024-11-17T01:27:43,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/ce3155069aab490f87f0bf340ef1987f is 50, key is test_row_0/B:col10/1731806861364/Put/seqid=0 2024-11-17T01:27:43,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742091_1267 (size=12001) 2024-11-17T01:27:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:43,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806923494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,497 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806923495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806923496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806923496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,499 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806923497, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806923597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806923597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806923599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806923599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,601 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806923600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,781 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:27:43,786 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/ce3155069aab490f87f0bf340ef1987f 2024-11-17T01:27:43,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/4069a118158241e980b93a84857ff005 is 50, key is test_row_0/C:col10/1731806861364/Put/seqid=0 2024-11-17T01:27:43,802 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806923800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806923802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806923802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,806 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806923804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:43,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806923804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:43,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742092_1268 (size=12001) 2024-11-17T01:27:43,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806924106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806924106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806924106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806924107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806924109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,210 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=49 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/4069a118158241e980b93a84857ff005 2024-11-17T01:27:44,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/a0efb2fe6fa7474c94ace387042ea164 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164 2024-11-17T01:27:44,218 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:27:44,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/ce3155069aab490f87f0bf340ef1987f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f 2024-11-17T01:27:44,222 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:27:44,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/4069a118158241e980b93a84857ff005 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005 2024-11-17T01:27:44,226 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005, entries=150, sequenceid=49, filesize=11.7 K 2024-11-17T01:27:44,227 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 8d60f9787865bae9bdb70c9c59e45f35 in 1260ms, sequenceid=49, compaction requested=true 2024-11-17T01:27:44,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:44,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:44,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-17T01:27:44,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-17T01:27:44,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-17T01:27:44,229 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-17T01:27:44,232 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.4160 sec 2024-11-17T01:27:44,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:44,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:44,611 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:44,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/2fc6684c8f9c4d768605cea6449db4ad is 50, key is test_row_0/A:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:44,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742093_1269 (size=14341) 2024-11-17T01:27:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806924615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806924617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806924618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806924619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806924618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/2fc6684c8f9c4d768605cea6449db4ad 2024-11-17T01:27:44,634 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/73e44cbc129a4c9f84ae8c5a6c32da7c is 50, key is test_row_0/B:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:44,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742094_1270 (size=12001) 2024-11-17T01:27:44,723 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806924720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806924723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806924723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806924723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-17T01:27:44,918 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-17T01:27:44,919 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:44,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-17T01:27:44,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:44,920 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:44,920 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:44,921 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:44,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806924925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806924926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806924927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:44,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:44,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806924927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:45,039 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/73e44cbc129a4c9f84ae8c5a6c32da7c 2024-11-17T01:27:45,046 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/c46844b63595425c80404f8b868ab4d6 is 50, key is test_row_0/C:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:45,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742095_1271 (size=12001) 2024-11-17T01:27:45,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,072 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-17T01:27:45,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:45,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,073 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:45,224 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,225 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-17T01:27:45,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:45,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,225 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806925230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806925230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806925231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,233 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806925231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,377 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,377 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-17T01:27:45,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:45,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,378 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:45,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/c46844b63595425c80404f8b868ab4d6 2024-11-17T01:27:45,459 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/2fc6684c8f9c4d768605cea6449db4ad as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad 2024-11-17T01:27:45,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad, entries=200, sequenceid=75, filesize=14.0 K 2024-11-17T01:27:45,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/73e44cbc129a4c9f84ae8c5a6c32da7c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c 2024-11-17T01:27:45,467 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c, entries=150, sequenceid=75, filesize=11.7 K 2024-11-17T01:27:45,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/c46844b63595425c80404f8b868ab4d6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6 2024-11-17T01:27:45,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6, entries=150, sequenceid=75, filesize=11.7 K 2024-11-17T01:27:45,472 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 8d60f9787865bae9bdb70c9c59e45f35 in 861ms, sequenceid=75, compaction requested=true 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:45,472 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:45,472 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:45,472 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:45,473 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50344 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:45,473 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:45,473 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:45,473 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:45,473 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,473 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,474 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=49.2 K 2024-11-17T01:27:45,474 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=46.9 K 2024-11-17T01:27:45,474 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 281f6e5fd4b849a985685cd3254689c1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806860720 2024-11-17T01:27:45,474 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c81835745daa4d839ad7947450c0579c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806860720 2024-11-17T01:27:45,474 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f131c4d669b344f8b0405745a113fba6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731806860743 2024-11-17T01:27:45,474 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3bbe8998f2e74cf98e940f9c1025b1b0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731806860743 2024-11-17T01:27:45,475 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ce3155069aab490f87f0bf340ef1987f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806861359 2024-11-17T01:27:45,475 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0efb2fe6fa7474c94ace387042ea164, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806861359 2024-11-17T01:27:45,475 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 73e44cbc129a4c9f84ae8c5a6c32da7c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:45,475 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2fc6684c8f9c4d768605cea6449db4ad, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:45,483 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:45,484 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/0be95ad1bba244bc8e28f463b7aee387 is 50, key is test_row_0/B:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:45,493 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#226 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:45,493 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/f9f24e90e4b04dfdb2c78d23b6799123 is 50, key is test_row_0/A:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:45,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742096_1272 (size=12139) 2024-11-17T01:27:45,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742097_1273 (size=12139) 2024-11-17T01:27:45,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:45,530 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-17T01:27:45,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,530 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-17T01:27:45,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:45,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:45,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:45,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:45,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:45,531 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:45,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e564ede70c56485d8498387b54b10e25 is 50, key is test_row_0/A:col10/1731806864617/Put/seqid=0 2024-11-17T01:27:45,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742098_1274 (size=12001) 2024-11-17T01:27:45,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:45,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:45,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806925702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,736 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806925734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806925736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806925736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806925736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:45,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806925805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:45,900 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/0be95ad1bba244bc8e28f463b7aee387 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0be95ad1bba244bc8e28f463b7aee387 2024-11-17T01:27:45,904 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into 0be95ad1bba244bc8e28f463b7aee387(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:45,904 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:45,904 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=12, startTime=1731806865472; duration=0sec 2024-11-17T01:27:45,905 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:45,905 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:45,905 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:27:45,906 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:27:45,906 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:45,906 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:45,906 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=46.9 K 2024-11-17T01:27:45,906 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 60d53496adc041fba3ac4266c20a4f8f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731806860720 2024-11-17T01:27:45,907 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d046b90d15f4af59b6549dcd7006129, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1731806860743 2024-11-17T01:27:45,907 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4069a118158241e980b93a84857ff005, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=49, earliestPutTs=1731806861359 2024-11-17T01:27:45,907 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c46844b63595425c80404f8b868ab4d6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:45,912 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/f9f24e90e4b04dfdb2c78d23b6799123 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/f9f24e90e4b04dfdb2c78d23b6799123 2024-11-17T01:27:45,917 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into f9f24e90e4b04dfdb2c78d23b6799123(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:45,917 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:45,917 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=12, startTime=1731806865472; duration=0sec 2024-11-17T01:27:45,917 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:45,917 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:45,917 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#228 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:45,918 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/b4cd48cddd0843ba9bf85316fcdb4779 is 50, key is test_row_0/C:col10/1731806864609/Put/seqid=0 2024-11-17T01:27:45,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742099_1275 (size=12139) 2024-11-17T01:27:45,938 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e564ede70c56485d8498387b54b10e25 2024-11-17T01:27:45,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/0c934a2c905b4431b50131b2f45ede7d is 50, key is test_row_0/B:col10/1731806864617/Put/seqid=0 2024-11-17T01:27:45,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742100_1276 (size=12001) 2024-11-17T01:27:46,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806926008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:46,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806926311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,325 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/b4cd48cddd0843ba9bf85316fcdb4779 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b4cd48cddd0843ba9bf85316fcdb4779 2024-11-17T01:27:46,329 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into b4cd48cddd0843ba9bf85316fcdb4779(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:46,329 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:46,329 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=12, startTime=1731806865472; duration=0sec 2024-11-17T01:27:46,329 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:46,329 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:46,348 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/0c934a2c905b4431b50131b2f45ede7d 2024-11-17T01:27:46,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/64c7f80b001c4258ba51a2420b4edc94 is 50, key is test_row_0/C:col10/1731806864617/Put/seqid=0 2024-11-17T01:27:46,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742101_1277 (size=12001) 2024-11-17T01:27:46,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806926741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806926742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806926742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,746 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806926745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,758 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/64c7f80b001c4258ba51a2420b4edc94 2024-11-17T01:27:46,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e564ede70c56485d8498387b54b10e25 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25 2024-11-17T01:27:46,765 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25, entries=150, sequenceid=85, filesize=11.7 K 2024-11-17T01:27:46,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/0c934a2c905b4431b50131b2f45ede7d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d 2024-11-17T01:27:46,769 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d, entries=150, sequenceid=85, filesize=11.7 K 2024-11-17T01:27:46,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/64c7f80b001c4258ba51a2420b4edc94 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94 2024-11-17T01:27:46,773 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94, entries=150, sequenceid=85, filesize=11.7 K 2024-11-17T01:27:46,774 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 8d60f9787865bae9bdb70c9c59e45f35 in 1244ms, sequenceid=85, compaction requested=false 2024-11-17T01:27:46,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:46,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:46,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-17T01:27:46,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-17T01:27:46,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-17T01:27:46,776 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8540 sec 2024-11-17T01:27:46,777 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 1.8570 sec 2024-11-17T01:27:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:46,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:46,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:46,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/0f2f55d97e7642e9867857e1523420c5 is 50, key is test_row_0/A:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:46,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742102_1278 (size=14341) 2024-11-17T01:27:46,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806926834, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:46,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:46,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806926936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-17T01:27:47,024 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-17T01:27:47,024 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:47,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-17T01:27:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:47,026 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:47,026 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:47,026 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:47,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:47,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:47,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806927140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,177 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,178 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,178 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/0f2f55d97e7642e9867857e1523420c5 2024-11-17T01:27:47,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/b7edb6f5ebd44e5e8b63e1d59828352c is 50, key is test_row_0/B:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:47,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742103_1279 (size=12001) 2024-11-17T01:27:47,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:47,330 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,330 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,330 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,331 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,331 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:47,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806927442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,482 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,483 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,483 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:47,634 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,634 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,635 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/b7edb6f5ebd44e5e8b63e1d59828352c 2024-11-17T01:27:47,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/8c495418a15743fb871a92446fc9a62d is 50, key is test_row_0/C:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:47,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742104_1280 (size=12001) 2024-11-17T01:27:47,786 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,787 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,787 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,939 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:47,939 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:47,939 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:47,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:47,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:47,949 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:47,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806927947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,046 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/8c495418a15743fb871a92446fc9a62d 2024-11-17T01:27:48,051 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/0f2f55d97e7642e9867857e1523420c5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5 2024-11-17T01:27:48,054 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5, entries=200, sequenceid=115, filesize=14.0 K 2024-11-17T01:27:48,055 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/b7edb6f5ebd44e5e8b63e1d59828352c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c 2024-11-17T01:27:48,058 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c, entries=150, sequenceid=115, filesize=11.7 K 2024-11-17T01:27:48,059 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/8c495418a15743fb871a92446fc9a62d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d 2024-11-17T01:27:48,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d, entries=150, sequenceid=115, filesize=11.7 K 2024-11-17T01:27:48,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8d60f9787865bae9bdb70c9c59e45f35 in 1246ms, sequenceid=115, compaction requested=true 2024-11-17T01:27:48,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:48,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:48,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:48,063 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:48,063 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:48,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:48,064 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:48,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:48,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:48,065 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:48,065 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:48,065 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0be95ad1bba244bc8e28f463b7aee387, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=35.3 K 2024-11-17T01:27:48,065 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/f9f24e90e4b04dfdb2c78d23b6799123, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=37.6 K 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0be95ad1bba244bc8e28f463b7aee387, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f9f24e90e4b04dfdb2c78d23b6799123, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c934a2c905b4431b50131b2f45ede7d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731806864613 2024-11-17T01:27:48,065 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting e564ede70c56485d8498387b54b10e25, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731806864613 2024-11-17T01:27:48,066 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting b7edb6f5ebd44e5e8b63e1d59828352c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865698 2024-11-17T01:27:48,066 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0f2f55d97e7642e9867857e1523420c5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865695 2024-11-17T01:27:48,072 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:48,072 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/0e9a272c93d74c1090dee17aab7807c0 is 50, key is test_row_0/A:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:48,076 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:48,077 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/c33215e2da6b469ba04f278ceaf468a9 is 50, key is test_row_0/B:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:48,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742105_1281 (size=12241) 2024-11-17T01:27:48,091 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742106_1282 (size=12241) 2024-11-17T01:27:48,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:48,092 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:48,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/30f9b6d9388d4c269248e3a3253b4ead is 50, key is test_row_0/A:col10/1731806866827/Put/seqid=0 2024-11-17T01:27:48,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742107_1283 (size=12001) 2024-11-17T01:27:48,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:48,493 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/0e9a272c93d74c1090dee17aab7807c0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0e9a272c93d74c1090dee17aab7807c0 2024-11-17T01:27:48,496 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/c33215e2da6b469ba04f278ceaf468a9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c33215e2da6b469ba04f278ceaf468a9 2024-11-17T01:27:48,499 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into 0e9a272c93d74c1090dee17aab7807c0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:48,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:48,499 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=13, startTime=1731806868063; duration=0sec 2024-11-17T01:27:48,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:48,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:48,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:48,500 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:48,500 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/30f9b6d9388d4c269248e3a3253b4ead 2024-11-17T01:27:48,500 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:48,500 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:48,501 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b4cd48cddd0843ba9bf85316fcdb4779, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=35.3 K 2024-11-17T01:27:48,501 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4cd48cddd0843ba9bf85316fcdb4779, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1731806863490 2024-11-17T01:27:48,501 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64c7f80b001c4258ba51a2420b4edc94, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1731806864613 2024-11-17T01:27:48,501 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c495418a15743fb871a92446fc9a62d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865698 2024-11-17T01:27:48,517 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into c33215e2da6b469ba04f278ceaf468a9(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:48,517 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:48,517 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:48,517 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=13, startTime=1731806868063; duration=0sec 2024-11-17T01:27:48,517 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:48,517 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:48,517 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/91c401eafb1d4a7bb5075e6d133529be is 50, key is test_row_0/C:col10/1731806865698/Put/seqid=0 2024-11-17T01:27:48,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/d0cbd119e50f4b3aaf7ef0b0e54906ed is 50, key is test_row_0/B:col10/1731806866827/Put/seqid=0 2024-11-17T01:27:48,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742108_1284 (size=12241) 2024-11-17T01:27:48,538 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/91c401eafb1d4a7bb5075e6d133529be as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/91c401eafb1d4a7bb5075e6d133529be 2024-11-17T01:27:48,543 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into 91c401eafb1d4a7bb5075e6d133529be(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:48,543 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:48,543 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=13, startTime=1731806868064; duration=0sec 2024-11-17T01:27:48,543 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:48,543 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:48,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742109_1285 (size=12001) 2024-11-17T01:27:48,550 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/d0cbd119e50f4b3aaf7ef0b0e54906ed 2024-11-17T01:27:48,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/f3e9b7d4229e426f847a3c8f984e1c69 is 50, key is test_row_0/C:col10/1731806866827/Put/seqid=0 2024-11-17T01:27:48,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742110_1286 (size=12001) 2024-11-17T01:27:48,580 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=124 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/f3e9b7d4229e426f847a3c8f984e1c69 2024-11-17T01:27:48,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/30f9b6d9388d4c269248e3a3253b4ead as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead 2024-11-17T01:27:48,589 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead, entries=150, sequenceid=124, filesize=11.7 K 2024-11-17T01:27:48,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/d0cbd119e50f4b3aaf7ef0b0e54906ed as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed 2024-11-17T01:27:48,595 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed, entries=150, sequenceid=124, filesize=11.7 K 2024-11-17T01:27:48,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/f3e9b7d4229e426f847a3c8f984e1c69 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69 2024-11-17T01:27:48,605 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69, entries=150, sequenceid=124, filesize=11.7 K 2024-11-17T01:27:48,606 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=0 B/0 for 8d60f9787865bae9bdb70c9c59e45f35 in 514ms, sequenceid=124, compaction requested=false 2024-11-17T01:27:48,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:48,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:48,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-17T01:27:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-17T01:27:48,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-17T01:27:48,609 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5820 sec 2024-11-17T01:27:48,610 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.5850 sec 2024-11-17T01:27:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:48,757 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:48,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:48,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/841059ffc47e4c4abe38cb5e766a9d2f is 50, key is test_row_0/A:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:48,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742111_1287 (size=12151) 2024-11-17T01:27:48,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/841059ffc47e4c4abe38cb5e766a9d2f 2024-11-17T01:27:48,775 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df818be45e5e4c7cb5888618175994a7 is 50, key is test_row_0/B:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:48,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742112_1288 (size=12151) 2024-11-17T01:27:48,780 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df818be45e5e4c7cb5888618175994a7 2024-11-17T01:27:48,785 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806928781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,786 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806928782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806928784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,788 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5f6d27f5ea1146e9b1674cbc26b63d26 is 50, key is test_row_0/C:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:48,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806928785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742113_1289 (size=12151) 2024-11-17T01:27:48,888 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806928886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806928887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,890 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806928888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806928891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:48,956 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:48,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806928955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806929089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806929090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806929092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,096 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806929094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-17T01:27:49,129 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-17T01:27:49,130 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:49,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-17T01:27:49,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-17T01:27:49,132 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:49,132 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:49,132 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:49,197 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5f6d27f5ea1146e9b1674cbc26b63d26 2024-11-17T01:27:49,201 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/841059ffc47e4c4abe38cb5e766a9d2f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f 2024-11-17T01:27:49,204 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f, entries=150, sequenceid=138, filesize=11.9 K 2024-11-17T01:27:49,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df818be45e5e4c7cb5888618175994a7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7 2024-11-17T01:27:49,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7, entries=150, sequenceid=138, filesize=11.9 K 2024-11-17T01:27:49,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5f6d27f5ea1146e9b1674cbc26b63d26 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26 2024-11-17T01:27:49,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26, entries=150, sequenceid=138, filesize=11.9 K 2024-11-17T01:27:49,212 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8d60f9787865bae9bdb70c9c59e45f35 in 456ms, sequenceid=138, compaction requested=true 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:49,213 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:49,213 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:49,213 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:49,214 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:49,214 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:49,214 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0e9a272c93d74c1090dee17aab7807c0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=35.5 K 2024-11-17T01:27:49,214 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c33215e2da6b469ba04f278ceaf468a9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=35.5 K 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0e9a272c93d74c1090dee17aab7807c0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865698 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c33215e2da6b469ba04f278ceaf468a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865698 2024-11-17T01:27:49,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 30f9b6d9388d4c269248e3a3253b4ead, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1731806866827 2024-11-17T01:27:49,215 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d0cbd119e50f4b3aaf7ef0b0e54906ed, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1731806866827 2024-11-17T01:27:49,215 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 841059ffc47e4c4abe38cb5e766a9d2f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:49,215 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting df818be45e5e4c7cb5888618175994a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:49,220 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#243 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:49,221 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/2e7bb7143eb946feadc6b655fbf303ee is 50, key is test_row_0/A:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:49,221 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#244 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:49,221 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/bac73e7c82bb47cea576e47183ccbeb2 is 50, key is test_row_0/B:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:49,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742114_1290 (size=12493) 2024-11-17T01:27:49,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742115_1291 (size=12493) 2024-11-17T01:27:49,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-17T01:27:49,270 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/2e7bb7143eb946feadc6b655fbf303ee as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2e7bb7143eb946feadc6b655fbf303ee 2024-11-17T01:27:49,274 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into 2e7bb7143eb946feadc6b655fbf303ee(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:49,275 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:49,275 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=13, startTime=1731806869213; duration=0sec 2024-11-17T01:27:49,275 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:49,275 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:49,275 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:49,276 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36393 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:49,276 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:49,276 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:49,276 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/91c401eafb1d4a7bb5075e6d133529be, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=35.5 K 2024-11-17T01:27:49,276 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 91c401eafb1d4a7bb5075e6d133529be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806865698 2024-11-17T01:27:49,277 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3e9b7d4229e426f847a3c8f984e1c69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=124, earliestPutTs=1731806866827 2024-11-17T01:27:49,277 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f6d27f5ea1146e9b1674cbc26b63d26, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:49,283 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#245 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:49,283 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0d0f0b85773d40b18c35d67d20004bc7 is 50, key is test_row_0/C:col10/1731806868750/Put/seqid=0 2024-11-17T01:27:49,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:49,284 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:49,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:49,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742116_1292 (size=12493) 2024-11-17T01:27:49,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8610526c5c3841cebfc9ab853ff05d7b is 50, key is test_row_0/A:col10/1731806868782/Put/seqid=0 2024-11-17T01:27:49,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742117_1293 (size=12151) 2024-11-17T01:27:49,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:49,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:49,403 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806929400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806929401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,404 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806929401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806929402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-17T01:27:49,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806929504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806929504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,506 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806929505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806929506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,630 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/bac73e7c82bb47cea576e47183ccbeb2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/bac73e7c82bb47cea576e47183ccbeb2 2024-11-17T01:27:49,634 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into bac73e7c82bb47cea576e47183ccbeb2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:49,634 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:49,635 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=13, startTime=1731806869213; duration=0sec 2024-11-17T01:27:49,635 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:49,635 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:49,694 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0d0f0b85773d40b18c35d67d20004bc7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0d0f0b85773d40b18c35d67d20004bc7 2024-11-17T01:27:49,694 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8610526c5c3841cebfc9ab853ff05d7b 2024-11-17T01:27:49,698 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into 0d0f0b85773d40b18c35d67d20004bc7(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:49,698 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:49,698 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=13, startTime=1731806869213; duration=0sec 2024-11-17T01:27:49,698 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:49,698 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:49,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/8ab826a9c1dc4d69858c1dbfacc12eae is 50, key is test_row_0/B:col10/1731806868782/Put/seqid=0 2024-11-17T01:27:49,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742118_1294 (size=12151) 2024-11-17T01:27:49,709 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806929707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806929707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806929708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,711 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/8ab826a9c1dc4d69858c1dbfacc12eae 2024-11-17T01:27:49,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:49,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806929710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:49,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/9db5b1996f1a4f4e92e83977deeeb0d2 is 50, key is test_row_0/C:col10/1731806868782/Put/seqid=0 2024-11-17T01:27:49,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742119_1295 (size=12151) 2024-11-17T01:27:49,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-17T01:27:50,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806930010, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,012 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806930011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,013 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806930012, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,016 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806930015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,122 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=164 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/9db5b1996f1a4f4e92e83977deeeb0d2 2024-11-17T01:27:50,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8610526c5c3841cebfc9ab853ff05d7b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b 2024-11-17T01:27:50,130 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b, entries=150, sequenceid=164, filesize=11.9 K 2024-11-17T01:27:50,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/8ab826a9c1dc4d69858c1dbfacc12eae as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae 2024-11-17T01:27:50,134 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae, entries=150, sequenceid=164, filesize=11.9 K 2024-11-17T01:27:50,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/9db5b1996f1a4f4e92e83977deeeb0d2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2 2024-11-17T01:27:50,138 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2, entries=150, sequenceid=164, filesize=11.9 K 2024-11-17T01:27:50,139 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8d60f9787865bae9bdb70c9c59e45f35 in 855ms, sequenceid=164, compaction requested=false 2024-11-17T01:27:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:50,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-17T01:27:50,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-17T01:27:50,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-17T01:27:50,141 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0080 sec 2024-11-17T01:27:50,142 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.0110 sec 2024-11-17T01:27:50,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-17T01:27:50,234 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-17T01:27:50,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:50,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-17T01:27:50,238 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:50,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-17T01:27:50,239 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:50,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:50,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-17T01:27:50,390 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,391 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-17T01:27:50,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:50,391 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:50,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:50,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:50,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:50,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:50,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:50,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:50,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/448cb045302f498784f7230366bfb5a1 is 50, key is test_row_0/A:col10/1731806869399/Put/seqid=0 2024-11-17T01:27:50,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742120_1296 (size=9757) 2024-11-17T01:27:50,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:50,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:50,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-17T01:27:50,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806930564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806930564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806930566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806930567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806930668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,669 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806930668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,670 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806930669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,672 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806930671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,799 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/448cb045302f498784f7230366bfb5a1 2024-11-17T01:27:50,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/907c3ffc5be14afe8f4e5cdd36f07809 is 50, key is test_row_0/B:col10/1731806869399/Put/seqid=0 2024-11-17T01:27:50,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742121_1297 (size=9757) 2024-11-17T01:27:50,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-17T01:27:50,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806930870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806930871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,872 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806930871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806930874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,976 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:50,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806930974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:50,977 DEBUG [Thread-1196 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4143 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:27:51,174 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806931173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806931174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,176 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806931174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806931178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,212 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/907c3ffc5be14afe8f4e5cdd36f07809 2024-11-17T01:27:51,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ac65ea82ab924295b133c021f969ff16 is 50, key is test_row_0/C:col10/1731806869399/Put/seqid=0 2024-11-17T01:27:51,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742122_1298 (size=9757) 2024-11-17T01:27:51,225 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=177 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ac65ea82ab924295b133c021f969ff16 2024-11-17T01:27:51,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/448cb045302f498784f7230366bfb5a1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1 2024-11-17T01:27:51,232 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1, entries=100, sequenceid=177, filesize=9.5 K 2024-11-17T01:27:51,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/907c3ffc5be14afe8f4e5cdd36f07809 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809 2024-11-17T01:27:51,236 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809, entries=100, sequenceid=177, filesize=9.5 K 2024-11-17T01:27:51,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ac65ea82ab924295b133c021f969ff16 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16 2024-11-17T01:27:51,241 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16, entries=100, sequenceid=177, filesize=9.5 K 2024-11-17T01:27:51,241 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8d60f9787865bae9bdb70c9c59e45f35 in 850ms, sequenceid=177, compaction requested=true 2024-11-17T01:27:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:51,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-17T01:27:51,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-17T01:27:51,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-17T01:27:51,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0030 sec 2024-11-17T01:27:51,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.0080 sec 2024-11-17T01:27:51,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-17T01:27:51,342 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-17T01:27:51,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:51,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-17T01:27:51,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-17T01:27:51,344 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:51,344 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:51,344 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:51,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-17T01:27:51,495 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:51,496 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:51,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:51,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:51,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9fc35cefd4fa4c9dbf79b999a59a371e is 50, key is test_row_0/A:col10/1731806870563/Put/seqid=0 2024-11-17T01:27:51,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742123_1299 (size=12151) 2024-11-17T01:27:51,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-17T01:27:51,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:51,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:51,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806931686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806931687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806931687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806931688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806931791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806931791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806931791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806931791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,904 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9fc35cefd4fa4c9dbf79b999a59a371e 2024-11-17T01:27:51,910 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/70a76224f89b492c9033f9446eaf0ab2 is 50, key is test_row_0/B:col10/1731806870563/Put/seqid=0 2024-11-17T01:27:51,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742124_1300 (size=12151) 2024-11-17T01:27:51,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-17T01:27:51,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806931993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806931993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806931993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:51,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:51,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806931994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806932296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806932296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,299 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806932297, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806932298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,326 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/70a76224f89b492c9033f9446eaf0ab2 2024-11-17T01:27:52,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/24f9db720fa1449485a023f08c74ed46 is 50, key is test_row_0/C:col10/1731806870563/Put/seqid=0 2024-11-17T01:27:52,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742125_1301 (size=12151) 2024-11-17T01:27:52,337 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/24f9db720fa1449485a023f08c74ed46 2024-11-17T01:27:52,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9fc35cefd4fa4c9dbf79b999a59a371e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e 2024-11-17T01:27:52,345 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e, entries=150, sequenceid=202, filesize=11.9 K 2024-11-17T01:27:52,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/70a76224f89b492c9033f9446eaf0ab2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2 2024-11-17T01:27:52,349 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2, entries=150, sequenceid=202, filesize=11.9 K 2024-11-17T01:27:52,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/24f9db720fa1449485a023f08c74ed46 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46 2024-11-17T01:27:52,354 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46, entries=150, sequenceid=202, filesize=11.9 K 2024-11-17T01:27:52,356 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 8d60f9787865bae9bdb70c9c59e45f35 in 860ms, sequenceid=202, compaction requested=true 2024-11-17T01:27:52,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:52,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:52,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-17T01:27:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-17T01:27:52,358 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-17T01:27:52,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0130 sec 2024-11-17T01:27:52,360 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.0160 sec 2024-11-17T01:27:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-17T01:27:52,447 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-17T01:27:52,448 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-17T01:27:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:52,449 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:52,449 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:52,450 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:52,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:52,601 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,602 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-17T01:27:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:52,602 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:27:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:52,602 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:52,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:52,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:52,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ee2bf5ae639d4fa9b631e802d060d729 is 50, key is test_row_0/A:col10/1731806871686/Put/seqid=0 2024-11-17T01:27:52,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742126_1302 (size=12151) 2024-11-17T01:27:52,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:52,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:52,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806932823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806932825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806932826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,829 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806932826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806932927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806932929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806932930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:52,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:52,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806932930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,011 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ee2bf5ae639d4fa9b631e802d060d729 2024-11-17T01:27:53,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/4042e342c04043db88ffb14eb6056b92 is 50, key is test_row_0/B:col10/1731806871686/Put/seqid=0 2024-11-17T01:27:53,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742127_1303 (size=12151) 2024-11-17T01:27:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:53,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806933132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,134 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806933133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,135 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806933133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806933134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,422 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/4042e342c04043db88ffb14eb6056b92 2024-11-17T01:27:53,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0b0d1f5e28ce4f7896efdef4bd42234c is 50, key is test_row_0/C:col10/1731806871686/Put/seqid=0 2024-11-17T01:27:53,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742128_1304 (size=12151) 2024-11-17T01:27:53,437 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806933435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806933437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806933437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,440 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806933437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:53,836 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0b0d1f5e28ce4f7896efdef4bd42234c 2024-11-17T01:27:53,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ee2bf5ae639d4fa9b631e802d060d729 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729 2024-11-17T01:27:53,844 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729, entries=150, sequenceid=213, filesize=11.9 K 2024-11-17T01:27:53,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/4042e342c04043db88ffb14eb6056b92 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92 2024-11-17T01:27:53,848 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92, entries=150, sequenceid=213, filesize=11.9 K 2024-11-17T01:27:53,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0b0d1f5e28ce4f7896efdef4bd42234c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c 2024-11-17T01:27:53,852 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c, entries=150, sequenceid=213, filesize=11.9 K 2024-11-17T01:27:53,852 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 8d60f9787865bae9bdb70c9c59e45f35 in 1250ms, sequenceid=213, compaction requested=true 2024-11-17T01:27:53,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:53,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:53,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-17T01:27:53,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-17T01:27:53,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-17T01:27:53,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4040 sec 2024-11-17T01:27:53,856 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.4070 sec 2024-11-17T01:27:53,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:53,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:53,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:53,947 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/d911e841f6584da2a21d87a3cdb01259 is 50, key is test_row_0/A:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:53,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806933947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,950 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806933947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742129_1305 (size=12151) 2024-11-17T01:27:53,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806933950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:53,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:53,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806933950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806934051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806934051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806934054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806934054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806934253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806934254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806934257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,259 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806934257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/d911e841f6584da2a21d87a3cdb01259 2024-11-17T01:27:54,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/fdb6d53f14554cceb46009fb82e8f49d is 50, key is test_row_0/B:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:54,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742130_1306 (size=12151) 2024-11-17T01:27:54,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-17T01:27:54,552 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-17T01:27:54,553 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:54,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-17T01:27:54,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:54,554 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:54,555 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:54,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:54,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806934558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,561 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806934558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806934561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806934561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:54,706 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,706 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-17T01:27:54,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:54,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:54,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:54,707 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,786 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/fdb6d53f14554cceb46009fb82e8f49d 2024-11-17T01:27:54,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ea67b177fae044f4b2b32836bc939799 is 50, key is test_row_0/C:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:54,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742131_1307 (size=12151) 2024-11-17T01:27:54,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:54,858 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,858 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-17T01:27:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:54,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:54,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:54,993 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:54,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46454 deadline: 1731806934991, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:54,994 DEBUG [Thread-1196 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., hostname=04f7e7347dc7,37721,1731806791503, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:27:55,010 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,010 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-17T01:27:55,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:55,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,011 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:55,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806935061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:55,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806935061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:55,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806935064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:55,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806935065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:55,162 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,163 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-17T01:27:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,163 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:55,196 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ea67b177fae044f4b2b32836bc939799 2024-11-17T01:27:55,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/d911e841f6584da2a21d87a3cdb01259 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259 2024-11-17T01:27:55,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259, entries=150, sequenceid=240, filesize=11.9 K 2024-11-17T01:27:55,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/fdb6d53f14554cceb46009fb82e8f49d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d 2024-11-17T01:27:55,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d, entries=150, sequenceid=240, filesize=11.9 K 2024-11-17T01:27:55,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ea67b177fae044f4b2b32836bc939799 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799 2024-11-17T01:27:55,210 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799, entries=150, sequenceid=240, filesize=11.9 K 2024-11-17T01:27:55,211 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 8d60f9787865bae9bdb70c9c59e45f35 in 1269ms, sequenceid=240, compaction requested=true 2024-11-17T01:27:55,211 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:55,212 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:55,212 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:55,212 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:55,213 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 70854 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-17T01:27:55,213 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 70854 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-17T01:27:55,213 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:55,213 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:55,213 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,213 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,213 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2e7bb7143eb946feadc6b655fbf303ee, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=69.2 K 2024-11-17T01:27:55,213 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/bac73e7c82bb47cea576e47183ccbeb2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=69.2 K 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bac73e7c82bb47cea576e47183ccbeb2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2e7bb7143eb946feadc6b655fbf303ee, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8610526c5c3841cebfc9ab853ff05d7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1731806868781 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ab826a9c1dc4d69858c1dbfacc12eae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1731806868781 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 448cb045302f498784f7230366bfb5a1, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731806869399 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 907c3ffc5be14afe8f4e5cdd36f07809, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731806869399 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9fc35cefd4fa4c9dbf79b999a59a371e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731806870563 2024-11-17T01:27:55,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 70a76224f89b492c9033f9446eaf0ab2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731806870563 2024-11-17T01:27:55,215 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee2bf5ae639d4fa9b631e802d060d729, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731806871684 2024-11-17T01:27:55,215 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4042e342c04043db88ffb14eb6056b92, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731806871684 2024-11-17T01:27:55,215 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting fdb6d53f14554cceb46009fb82e8f49d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:55,215 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting d911e841f6584da2a21d87a3cdb01259, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:55,223 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#262 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:55,224 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#261 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:55,224 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/b1578e8e5b1845c1a9f5cea6c44438a5 is 50, key is test_row_0/A:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:55,224 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/627a0f2257f6493281756362280342b7 is 50, key is test_row_0/B:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742132_1308 (size=12697) 2024-11-17T01:27:55,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742133_1309 (size=12697) 2024-11-17T01:27:55,315 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:55,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-17T01:27:55,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,315 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:55,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:55,320 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ce2f8d46bc394f018f9be34abbca28a5 is 50, key is test_row_0/A:col10/1731806873946/Put/seqid=0 2024-11-17T01:27:55,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742134_1310 (size=12151) 2024-11-17T01:27:55,631 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/b1578e8e5b1845c1a9f5cea6c44438a5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/b1578e8e5b1845c1a9f5cea6c44438a5 2024-11-17T01:27:55,631 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/627a0f2257f6493281756362280342b7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/627a0f2257f6493281756362280342b7 2024-11-17T01:27:55,639 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into 627a0f2257f6493281756362280342b7(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:55,639 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into b1578e8e5b1845c1a9f5cea6c44438a5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:55,639 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=10, startTime=1731806875212; duration=0sec 2024-11-17T01:27:55,639 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=10, startTime=1731806875211; duration=0sec 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:55,639 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-17T01:27:55,640 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 70854 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-17T01:27:55,641 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:55,641 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:55,641 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0d0f0b85773d40b18c35d67d20004bc7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=69.2 K 2024-11-17T01:27:55,641 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d0f0b85773d40b18c35d67d20004bc7, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731806868750 2024-11-17T01:27:55,642 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9db5b1996f1a4f4e92e83977deeeb0d2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=164, earliestPutTs=1731806868781 2024-11-17T01:27:55,642 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ac65ea82ab924295b133c021f969ff16, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=177, earliestPutTs=1731806869399 2024-11-17T01:27:55,642 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 24f9db720fa1449485a023f08c74ed46, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1731806870563 2024-11-17T01:27:55,642 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b0d1f5e28ce4f7896efdef4bd42234c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731806871684 2024-11-17T01:27:55,643 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ea67b177fae044f4b2b32836bc939799, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:55,652 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#264 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:55,653 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ba683b0cad994bba94bca5a10c86c9b1 is 50, key is test_row_0/C:col10/1731806872816/Put/seqid=0 2024-11-17T01:27:55,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:55,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742135_1311 (size=12697) 2024-11-17T01:27:55,723 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ce2f8d46bc394f018f9be34abbca28a5 2024-11-17T01:27:55,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/2eb39378baa04de38be395abbe90af9e is 50, key is test_row_0/B:col10/1731806873946/Put/seqid=0 2024-11-17T01:27:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742136_1312 (size=12151) 2024-11-17T01:27:56,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:56,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:56,077 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/ba683b0cad994bba94bca5a10c86c9b1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ba683b0cad994bba94bca5a10c86c9b1 2024-11-17T01:27:56,083 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into ba683b0cad994bba94bca5a10c86c9b1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:56,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:56,083 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=10, startTime=1731806875212; duration=0sec 2024-11-17T01:27:56,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:56,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:56,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806936092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806936093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,096 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806936094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,097 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806936094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,152 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/2eb39378baa04de38be395abbe90af9e 2024-11-17T01:27:56,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/b7f6f0fca5e44848abecff781cd7869b is 50, key is test_row_0/C:col10/1731806873946/Put/seqid=0 2024-11-17T01:27:56,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742137_1313 (size=12151) 2024-11-17T01:27:56,162 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/b7f6f0fca5e44848abecff781cd7869b 2024-11-17T01:27:56,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ce2f8d46bc394f018f9be34abbca28a5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5 2024-11-17T01:27:56,169 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5, entries=150, sequenceid=249, filesize=11.9 K 2024-11-17T01:27:56,169 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/2eb39378baa04de38be395abbe90af9e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e 2024-11-17T01:27:56,172 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e, entries=150, sequenceid=249, filesize=11.9 K 2024-11-17T01:27:56,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/b7f6f0fca5e44848abecff781cd7869b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b 2024-11-17T01:27:56,176 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b, entries=150, sequenceid=249, filesize=11.9 K 2024-11-17T01:27:56,177 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for 8d60f9787865bae9bdb70c9c59e45f35 in 861ms, sequenceid=249, compaction requested=false 2024-11-17T01:27:56,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:56,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:56,177 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-17T01:27:56,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-17T01:27:56,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-17T01:27:56,179 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6230 sec 2024-11-17T01:27:56,179 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 1.6260 sec 2024-11-17T01:27:56,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:56,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:56,199 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:56,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806936199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806936200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8a5dd36774d64fd59a66ac704abc02e6 is 50, key is test_row_0/A:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806936201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,204 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806936202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742138_1314 (size=14741) 2024-11-17T01:27:56,207 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8a5dd36774d64fd59a66ac704abc02e6 2024-11-17T01:27:56,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a754df3984dc4974893c6f426b9b616d is 50, key is test_row_0/B:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:56,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742139_1315 (size=12301) 2024-11-17T01:27:56,303 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806936302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806936304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806936305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806936305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806936506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806936507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806936507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806936507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a754df3984dc4974893c6f426b9b616d 2024-11-17T01:27:56,624 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5793c5caa5f34394bd56522dcd44a497 is 50, key is test_row_0/C:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:56,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742140_1316 (size=12301) 2024-11-17T01:27:56,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-17T01:27:56,658 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-17T01:27:56,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:56,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-17T01:27:56,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:56,660 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:56,660 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:56,661 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:56,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:56,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806936808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806936809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,812 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,812 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-17T01:27:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:56,812 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:56,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:56,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:56,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806936811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:56,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806936812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:56,963 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:56,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-17T01:27:56,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:56,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:56,964 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:56,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:57,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.38 KB at sequenceid=282 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5793c5caa5f34394bd56522dcd44a497 2024-11-17T01:27:57,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/8a5dd36774d64fd59a66ac704abc02e6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6 2024-11-17T01:27:57,044 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6, entries=200, sequenceid=282, filesize=14.4 K 2024-11-17T01:27:57,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a754df3984dc4974893c6f426b9b616d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d 2024-11-17T01:27:57,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d, entries=150, sequenceid=282, filesize=12.0 K 2024-11-17T01:27:57,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/5793c5caa5f34394bd56522dcd44a497 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497 2024-11-17T01:27:57,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497, entries=150, sequenceid=282, filesize=12.0 K 2024-11-17T01:27:57,052 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~181.14 KB/185490, heapSize ~475.31 KB/486720, currentSize=20.13 KB/20610 for 8d60f9787865bae9bdb70c9c59e45f35 in 854ms, sequenceid=282, compaction requested=true 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:57,053 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:57,053 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:57,053 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39589 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:57,054 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:57,054 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:57,054 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/b1578e8e5b1845c1a9f5cea6c44438a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=38.7 K 2024-11-17T01:27:57,054 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/627a0f2257f6493281756362280342b7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=36.3 K 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1578e8e5b1845c1a9f5cea6c44438a5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 627a0f2257f6493281756362280342b7, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:57,054 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce2f8d46bc394f018f9be34abbca28a5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731806873946 2024-11-17T01:27:57,055 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2eb39378baa04de38be395abbe90af9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731806873946 2024-11-17T01:27:57,055 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a5dd36774d64fd59a66ac704abc02e6, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:57,055 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a754df3984dc4974893c6f426b9b616d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:57,061 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:57,062 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/3e860ae7dafa4404855b0f6fa8eec03c is 50, key is test_row_0/B:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:57,065 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#271 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:57,065 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ef4064a17ef546dbb62074beea692dd7 is 50, key is test_row_0/A:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:57,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742141_1317 (size=12949) 2024-11-17T01:27:57,072 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/3e860ae7dafa4404855b0f6fa8eec03c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/3e860ae7dafa4404855b0f6fa8eec03c 2024-11-17T01:27:57,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742142_1318 (size=12949) 2024-11-17T01:27:57,076 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into 3e860ae7dafa4404855b0f6fa8eec03c(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:57,076 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:57,076 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=13, startTime=1731806877053; duration=0sec 2024-11-17T01:27:57,076 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:57,076 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:57,077 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:57,077 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:57,077 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:57,077 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:57,078 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ba683b0cad994bba94bca5a10c86c9b1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=36.3 K 2024-11-17T01:27:57,078 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ba683b0cad994bba94bca5a10c86c9b1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=240, earliestPutTs=1731806872816 2024-11-17T01:27:57,078 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting b7f6f0fca5e44848abecff781cd7869b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731806873946 2024-11-17T01:27:57,079 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5793c5caa5f34394bd56522dcd44a497, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:57,085 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:57,085 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7 is 50, key is test_row_0/C:col10/1731806876093/Put/seqid=0 2024-11-17T01:27:57,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742143_1319 (size=12949) 2024-11-17T01:27:57,116 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-17T01:27:57,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:57,116 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:57,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:57,120 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/cbff2d28a6e9450293cb798093c63760 is 50, key is test_row_1/A:col10/1731806876200/Put/seqid=0 2024-11-17T01:27:57,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742144_1320 (size=9857) 2024-11-17T01:27:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:57,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:57,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:57,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806937337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806937338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,340 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806937338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806937339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806937441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806937441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806937441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806937442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,477 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ef4064a17ef546dbb62074beea692dd7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ef4064a17ef546dbb62074beea692dd7 2024-11-17T01:27:57,482 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into ef4064a17ef546dbb62074beea692dd7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:57,482 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:57,482 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=13, startTime=1731806877053; duration=0sec 2024-11-17T01:27:57,482 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:57,482 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:57,501 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7 2024-11-17T01:27:57,506 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into 0ddbaa63f19c4bb7966fe7ed2bc1e0d7(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:57,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:57,506 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=13, startTime=1731806877053; duration=0sec 2024-11-17T01:27:57,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:57,506 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:57,527 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/cbff2d28a6e9450293cb798093c63760 2024-11-17T01:27:57,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/51c1d88dedae4b88946af82cd3bd06c2 is 50, key is test_row_1/B:col10/1731806876200/Put/seqid=0 2024-11-17T01:27:57,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742145_1321 (size=9857) 2024-11-17T01:27:57,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806937643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806937644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806937644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806937645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:57,938 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/51c1d88dedae4b88946af82cd3bd06c2 2024-11-17T01:27:57,944 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/883235ce019f4c108593836767a67f61 is 50, key is test_row_1/C:col10/1731806876200/Put/seqid=0 2024-11-17T01:27:57,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742146_1322 (size=9857) 2024-11-17T01:27:57,948 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806937946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806937947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806937947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:57,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:57,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806937947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,348 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/883235ce019f4c108593836767a67f61 2024-11-17T01:27:58,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/cbff2d28a6e9450293cb798093c63760 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760 2024-11-17T01:27:58,354 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760, entries=100, sequenceid=289, filesize=9.6 K 2024-11-17T01:27:58,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/51c1d88dedae4b88946af82cd3bd06c2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2 2024-11-17T01:27:58,358 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2, entries=100, sequenceid=289, filesize=9.6 K 2024-11-17T01:27:58,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/883235ce019f4c108593836767a67f61 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61 2024-11-17T01:27:58,362 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61, entries=100, sequenceid=289, filesize=9.6 K 2024-11-17T01:27:58,363 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=181.14 KB/185490 for 8d60f9787865bae9bdb70c9c59e45f35 in 1247ms, sequenceid=289, compaction requested=false 2024-11-17T01:27:58,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-17T01:27:58,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-17T01:27:58,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-17T01:27:58,365 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7030 sec 2024-11-17T01:27:58,366 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.7060 sec 2024-11-17T01:27:58,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:58,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:58,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806938455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806938455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,457 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806938455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806938455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ad28bb7741204a44a1cede07d2ce8295 is 50, key is test_row_0/A:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742147_1323 (size=17181) 2024-11-17T01:27:58,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ad28bb7741204a44a1cede07d2ce8295 2024-11-17T01:27:58,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df94c5f8727b45b7ae72c387a883ee12 is 50, key is test_row_0/B:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742148_1324 (size=12301) 2024-11-17T01:27:58,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806938558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806938558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,560 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806938558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806938558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806938761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806938761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806938761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:58,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806938761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-17T01:27:58,763 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-17T01:27:58,764 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-17T01:27:58,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-17T01:27:58,765 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:58,766 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:58,766 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:58,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-17T01:27:58,877 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df94c5f8727b45b7ae72c387a883ee12 2024-11-17T01:27:58,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/a6cb3dacc776404fad9e2f98cada6503 is 50, key is test_row_0/C:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742149_1325 (size=12301) 2024-11-17T01:27:58,889 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=69.33 KB at sequenceid=325 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/a6cb3dacc776404fad9e2f98cada6503 2024-11-17T01:27:58,893 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/ad28bb7741204a44a1cede07d2ce8295 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295 2024-11-17T01:27:58,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295, entries=250, sequenceid=325, filesize=16.8 K 2024-11-17T01:27:58,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/df94c5f8727b45b7ae72c387a883ee12 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12 2024-11-17T01:27:58,900 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12, entries=150, sequenceid=325, filesize=12.0 K 2024-11-17T01:27:58,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/a6cb3dacc776404fad9e2f98cada6503 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503 2024-11-17T01:27:58,905 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503, entries=150, sequenceid=325, filesize=12.0 K 2024-11-17T01:27:58,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~207.98 KB/212970, heapSize ~545.63 KB/558720, currentSize=0 B/0 for 8d60f9787865bae9bdb70c9c59e45f35 in 452ms, sequenceid=325, compaction requested=true 2024-11-17T01:27:58,906 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,907 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:58,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:27:58,907 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:58,907 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:58,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:27:58,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:58,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8d60f9787865bae9bdb70c9c59e45f35:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:27:58,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/B is initiating minor compaction (all files) 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39987 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:58,908 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/B in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/A is initiating minor compaction (all files) 2024-11-17T01:27:58,908 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/A in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,908 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/3e860ae7dafa4404855b0f6fa8eec03c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=34.3 K 2024-11-17T01:27:58,908 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ef4064a17ef546dbb62074beea692dd7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=39.0 K 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e860ae7dafa4404855b0f6fa8eec03c, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:58,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef4064a17ef546dbb62074beea692dd7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:58,909 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting cbff2d28a6e9450293cb798093c63760, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731806876200 2024-11-17T01:27:58,909 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 51c1d88dedae4b88946af82cd3bd06c2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731806876200 2024-11-17T01:27:58,909 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad28bb7741204a44a1cede07d2ce8295, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1731806877336 2024-11-17T01:27:58,909 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting df94c5f8727b45b7ae72c387a883ee12, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1731806877338 2024-11-17T01:27:58,917 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#B#compaction#279 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:58,917 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:58,917 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/884dabf49cd544c29b3f4908d42c0bc1 is 50, key is test_row_0/B:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,918 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-17T01:27:58,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-17T01:27:58,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-17T01:27:58,920 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#A#compaction#280 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:58,921 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/223200e884704aafa63c1af40d922cb1 is 50, key is test_row_0/A:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-17T01:27:58,922 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 153 msec 2024-11-17T01:27:58,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 159 msec 2024-11-17T01:27:58,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742150_1326 (size=13051) 2024-11-17T01:27:58,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742151_1327 (size=13051) 2024-11-17T01:27:58,938 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/884dabf49cd544c29b3f4908d42c0bc1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/884dabf49cd544c29b3f4908d42c0bc1 2024-11-17T01:27:58,940 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/223200e884704aafa63c1af40d922cb1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/223200e884704aafa63c1af40d922cb1 2024-11-17T01:27:58,945 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/B of 8d60f9787865bae9bdb70c9c59e45f35 into 884dabf49cd544c29b3f4908d42c0bc1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:58,945 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,945 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/B, priority=13, startTime=1731806878907; duration=0sec 2024-11-17T01:27:58,946 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:27:58,946 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:B 2024-11-17T01:27:58,946 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:27:58,947 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 35107 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:27:58,947 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 8d60f9787865bae9bdb70c9c59e45f35/C is initiating minor compaction (all files) 2024-11-17T01:27:58,947 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 8d60f9787865bae9bdb70c9c59e45f35/C in TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:58,947 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp, totalSize=34.3 K 2024-11-17T01:27:58,948 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/A of 8d60f9787865bae9bdb70c9c59e45f35 into 223200e884704aafa63c1af40d922cb1(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:58,948 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ddbaa63f19c4bb7966fe7ed2bc1e0d7, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=282, earliestPutTs=1731806876093 2024-11-17T01:27:58,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,948 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/A, priority=13, startTime=1731806878906; duration=0sec 2024-11-17T01:27:58,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:58,948 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:A 2024-11-17T01:27:58,948 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 883235ce019f4c108593836767a67f61, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731806876200 2024-11-17T01:27:58,949 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a6cb3dacc776404fad9e2f98cada6503, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=325, earliestPutTs=1731806877338 2024-11-17T01:27:58,956 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8d60f9787865bae9bdb70c9c59e45f35#C#compaction#281 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:27:58,956 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0c3f7731af0349e8a33859d2ae27c31f is 50, key is test_row_0/C:col10/1731806878454/Put/seqid=0 2024-11-17T01:27:58,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742152_1328 (size=13051) 2024-11-17T01:27:58,969 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/0c3f7731af0349e8a33859d2ae27c31f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0c3f7731af0349e8a33859d2ae27c31f 2024-11-17T01:27:58,974 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 8d60f9787865bae9bdb70c9c59e45f35/C of 8d60f9787865bae9bdb70c9c59e45f35 into 0c3f7731af0349e8a33859d2ae27c31f(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:27:58,974 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:27:58,975 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35., storeName=8d60f9787865bae9bdb70c9c59e45f35/C, priority=13, startTime=1731806878908; duration=0sec 2024-11-17T01:27:58,975 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:27:58,975 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8d60f9787865bae9bdb70c9c59e45f35:C 2024-11-17T01:27:59,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-17T01:27:59,067 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-17T01:27:59,068 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:27:59,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-17T01:27:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:27:59,070 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:27:59,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:27:59,070 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:27:59,071 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:27:59,071 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:27:59,071 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:27:59,075 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e18d88486f5046da9e7378c72e8f19da is 50, key is test_row_0/A:col10/1731806879067/Put/seqid=0 2024-11-17T01:27:59,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742153_1329 (size=12301) 2024-11-17T01:27:59,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806939089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806939091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806939091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806939091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:27:59,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806939193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806939194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,195 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806939194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806939194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,222 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,223 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:27:59,375 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,375 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,376 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,376 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806939395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806939396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806939397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,399 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806939398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e18d88486f5046da9e7378c72e8f19da 2024-11-17T01:27:59,487 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a33fe72b3e3542178e64d828820eb438 is 50, key is test_row_0/B:col10/1731806879067/Put/seqid=0 2024-11-17T01:27:59,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742154_1330 (size=12301) 2024-11-17T01:27:59,527 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,528 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,528 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:27:59,680 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,680 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,680 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806939698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806939700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806939701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:27:59,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806939702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,832 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,832 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,833 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,891 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a33fe72b3e3542178e64d828820eb438 2024-11-17T01:27:59,897 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6eef99897d55413e80dd26f6112bfd3e is 50, key is test_row_0/C:col10/1731806879067/Put/seqid=0 2024-11-17T01:27:59,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742155_1331 (size=12301) 2024-11-17T01:27:59,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:27:59,984 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:27:59,985 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:27:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:27:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:27:59,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:27:59,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,137 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,137 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:28:00,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:00,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:28:00,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:00,138 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,138 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:28:00,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46424 deadline: 1731806940203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,204 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46400 deadline: 1731806940203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,204 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:00,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46472 deadline: 1731806940203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,205 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:00,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:46440 deadline: 1731806940204, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,289 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,290 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:28:00,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:00,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:28:00,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:00,290 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:00,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=342 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6eef99897d55413e80dd26f6112bfd3e 2024-11-17T01:28:00,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/e18d88486f5046da9e7378c72e8f19da as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e18d88486f5046da9e7378c72e8f19da 2024-11-17T01:28:00,307 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e18d88486f5046da9e7378c72e8f19da, entries=150, sequenceid=342, filesize=12.0 K 2024-11-17T01:28:00,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a33fe72b3e3542178e64d828820eb438 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a33fe72b3e3542178e64d828820eb438 2024-11-17T01:28:00,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a33fe72b3e3542178e64d828820eb438, entries=150, sequenceid=342, filesize=12.0 K 2024-11-17T01:28:00,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/6eef99897d55413e80dd26f6112bfd3e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6eef99897d55413e80dd26f6112bfd3e 2024-11-17T01:28:00,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6eef99897d55413e80dd26f6112bfd3e, entries=150, sequenceid=342, filesize=12.0 K 2024-11-17T01:28:00,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 8d60f9787865bae9bdb70c9c59e45f35 in 1252ms, sequenceid=342, compaction requested=false 2024-11-17T01:28:00,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:28:00,442 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:00,442 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-17T01:28:00,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:00,442 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:28:00,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:00,446 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/4b1dbb94f3144dca9e4d6c08fa57d48f is 50, key is test_row_0/A:col10/1731806879085/Put/seqid=0 2024-11-17T01:28:00,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742156_1332 (size=12301) 2024-11-17T01:28:00,709 DEBUG [Thread-1207 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d68f787 to 127.0.0.1:63898 2024-11-17T01:28:00,709 DEBUG [Thread-1203 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0eb04aeb to 127.0.0.1:63898 2024-11-17T01:28:00,709 DEBUG [Thread-1207 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:00,709 DEBUG [Thread-1203 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:00,710 DEBUG [Thread-1201 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2fef31f8 to 127.0.0.1:63898 2024-11-17T01:28:00,710 DEBUG [Thread-1201 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:00,711 DEBUG [Thread-1209 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x10e6bf6a to 127.0.0.1:63898 2024-11-17T01:28:00,711 DEBUG [Thread-1209 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:00,711 DEBUG [Thread-1205 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6a0e9c8f to 127.0.0.1:63898 2024-11-17T01:28:00,711 DEBUG [Thread-1205 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:00,855 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/4b1dbb94f3144dca9e4d6c08fa57d48f 2024-11-17T01:28:00,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f0c8e930874d4dfd86642f044f1f4de4 is 50, key is test_row_0/B:col10/1731806879085/Put/seqid=0 2024-11-17T01:28:00,873 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742157_1333 (size=12301) 2024-11-17T01:28:01,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:28:01,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:28:01,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. as already flushing 2024-11-17T01:28:01,213 DEBUG [Thread-1192 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6862e3ce to 127.0.0.1:63898 2024-11-17T01:28:01,213 DEBUG [Thread-1192 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:01,214 DEBUG [Thread-1194 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d296fed to 127.0.0.1:63898 2024-11-17T01:28:01,214 DEBUG [Thread-1190 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a569490 to 127.0.0.1:63898 2024-11-17T01:28:01,214 DEBUG [Thread-1194 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:01,214 DEBUG [Thread-1190 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:01,216 DEBUG [Thread-1198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x43f04e0e to 127.0.0.1:63898 2024-11-17T01:28:01,216 DEBUG [Thread-1198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:01,275 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f0c8e930874d4dfd86642f044f1f4de4 2024-11-17T01:28:01,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/7517c23535e84be2aa5555326d28afcb is 50, key is test_row_0/C:col10/1731806879085/Put/seqid=0 2024-11-17T01:28:01,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742158_1334 (size=12301) 2024-11-17T01:28:01,696 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=365 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/7517c23535e84be2aa5555326d28afcb 2024-11-17T01:28:01,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/4b1dbb94f3144dca9e4d6c08fa57d48f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/4b1dbb94f3144dca9e4d6c08fa57d48f 2024-11-17T01:28:01,711 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/4b1dbb94f3144dca9e4d6c08fa57d48f, entries=150, sequenceid=365, filesize=12.0 K 2024-11-17T01:28:01,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/f0c8e930874d4dfd86642f044f1f4de4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f0c8e930874d4dfd86642f044f1f4de4 2024-11-17T01:28:01,718 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f0c8e930874d4dfd86642f044f1f4de4, entries=150, sequenceid=365, filesize=12.0 K 2024-11-17T01:28:01,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/7517c23535e84be2aa5555326d28afcb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/7517c23535e84be2aa5555326d28afcb 2024-11-17T01:28:01,723 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/7517c23535e84be2aa5555326d28afcb, entries=150, sequenceid=365, filesize=12.0 K 2024-11-17T01:28:01,724 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=26.84 KB/27480 for 8d60f9787865bae9bdb70c9c59e45f35 in 1282ms, sequenceid=365, compaction requested=true 2024-11-17T01:28:01,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:28:01,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:01,724 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-17T01:28:01,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-17T01:28:01,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-17T01:28:01,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6540 sec 2024-11-17T01:28:01,727 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 2.6590 sec 2024-11-17T01:28:03,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-17T01:28:03,178 INFO [Thread-1200 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-17T01:28:05,036 DEBUG [Thread-1196 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x08d0caa5 to 127.0.0.1:63898 2024-11-17T01:28:05,036 DEBUG [Thread-1196 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 67 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 64 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6780 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7207 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6437 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6777 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 7214 2024-11-17T01:28:05,037 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:28:05,037 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:28:05,037 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53bfce45 to 127.0.0.1:63898 2024-11-17T01:28:05,038 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:05,039 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:28:05,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:28:05,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:05,044 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806885044"}]},"ts":"1731806885044"} 2024-11-17T01:28:05,046 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:28:05,066 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:28:05,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:28:05,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, UNASSIGN}] 2024-11-17T01:28:05,070 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, UNASSIGN 2024-11-17T01:28:05,071 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=8d60f9787865bae9bdb70c9c59e45f35, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:05,072 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:28:05,072 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:05,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:05,224 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:05,226 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:28:05,226 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:28:05,226 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 8d60f9787865bae9bdb70c9c59e45f35, disabling compactions & flushes 2024-11-17T01:28:05,226 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:05,226 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:05,227 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. after waiting 0 ms 2024-11-17T01:28:05,227 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:05,227 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 8d60f9787865bae9bdb70c9c59e45f35 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-17T01:28:05,227 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=A 2024-11-17T01:28:05,228 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:05,228 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=B 2024-11-17T01:28:05,228 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:05,228 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 8d60f9787865bae9bdb70c9c59e45f35, store=C 2024-11-17T01:28:05,228 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:05,235 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9de2f883c20b47b98f83259b845ff9f8 is 50, key is test_row_0/A:col10/1731806881210/Put/seqid=0 2024-11-17T01:28:05,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742159_1335 (size=12301) 2024-11-17T01:28:05,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:05,642 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9de2f883c20b47b98f83259b845ff9f8 2024-11-17T01:28:05,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:05,655 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a02c8677ecea43d9ab2a78e12cd3c839 is 50, key is test_row_0/B:col10/1731806881210/Put/seqid=0 2024-11-17T01:28:05,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742160_1336 (size=12301) 2024-11-17T01:28:06,060 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a02c8677ecea43d9ab2a78e12cd3c839 2024-11-17T01:28:06,074 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/d91f7d6a47914048a74fb8f23e423b43 is 50, key is test_row_0/C:col10/1731806881210/Put/seqid=0 2024-11-17T01:28:06,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742161_1337 (size=12301) 2024-11-17T01:28:06,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:06,480 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=373 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/d91f7d6a47914048a74fb8f23e423b43 2024-11-17T01:28:06,492 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/A/9de2f883c20b47b98f83259b845ff9f8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9de2f883c20b47b98f83259b845ff9f8 2024-11-17T01:28:06,497 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9de2f883c20b47b98f83259b845ff9f8, entries=150, sequenceid=373, filesize=12.0 K 2024-11-17T01:28:06,498 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/B/a02c8677ecea43d9ab2a78e12cd3c839 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a02c8677ecea43d9ab2a78e12cd3c839 2024-11-17T01:28:06,503 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a02c8677ecea43d9ab2a78e12cd3c839, entries=150, sequenceid=373, filesize=12.0 K 2024-11-17T01:28:06,503 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/.tmp/C/d91f7d6a47914048a74fb8f23e423b43 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/d91f7d6a47914048a74fb8f23e423b43 2024-11-17T01:28:06,509 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/d91f7d6a47914048a74fb8f23e423b43, entries=150, sequenceid=373, filesize=12.0 K 2024-11-17T01:28:06,510 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 8d60f9787865bae9bdb70c9c59e45f35 in 1282ms, sequenceid=373, compaction requested=true 2024-11-17T01:28:06,510 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/f9f24e90e4b04dfdb2c78d23b6799123, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0e9a272c93d74c1090dee17aab7807c0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2e7bb7143eb946feadc6b655fbf303ee, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/b1578e8e5b1845c1a9f5cea6c44438a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ef4064a17ef546dbb62074beea692dd7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295] to archive 2024-11-17T01:28:06,511 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:06,513 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/281f6e5fd4b849a985685cd3254689c1 2024-11-17T01:28:06,514 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/3bbe8998f2e74cf98e940f9c1025b1b0 2024-11-17T01:28:06,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/a0efb2fe6fa7474c94ace387042ea164 2024-11-17T01:28:06,516 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2fc6684c8f9c4d768605cea6449db4ad 2024-11-17T01:28:06,517 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/f9f24e90e4b04dfdb2c78d23b6799123 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/f9f24e90e4b04dfdb2c78d23b6799123 2024-11-17T01:28:06,518 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e564ede70c56485d8498387b54b10e25 2024-11-17T01:28:06,519 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0f2f55d97e7642e9867857e1523420c5 2024-11-17T01:28:06,519 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0e9a272c93d74c1090dee17aab7807c0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/0e9a272c93d74c1090dee17aab7807c0 2024-11-17T01:28:06,520 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/30f9b6d9388d4c269248e3a3253b4ead 2024-11-17T01:28:06,521 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2e7bb7143eb946feadc6b655fbf303ee to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/2e7bb7143eb946feadc6b655fbf303ee 2024-11-17T01:28:06,522 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/841059ffc47e4c4abe38cb5e766a9d2f 2024-11-17T01:28:06,523 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8610526c5c3841cebfc9ab853ff05d7b 2024-11-17T01:28:06,524 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/448cb045302f498784f7230366bfb5a1 2024-11-17T01:28:06,525 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9fc35cefd4fa4c9dbf79b999a59a371e 2024-11-17T01:28:06,526 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ee2bf5ae639d4fa9b631e802d060d729 2024-11-17T01:28:06,526 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/b1578e8e5b1845c1a9f5cea6c44438a5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/b1578e8e5b1845c1a9f5cea6c44438a5 2024-11-17T01:28:06,527 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/d911e841f6584da2a21d87a3cdb01259 2024-11-17T01:28:06,528 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ce2f8d46bc394f018f9be34abbca28a5 2024-11-17T01:28:06,529 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/8a5dd36774d64fd59a66ac704abc02e6 2024-11-17T01:28:06,530 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ef4064a17ef546dbb62074beea692dd7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ef4064a17ef546dbb62074beea692dd7 2024-11-17T01:28:06,531 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/cbff2d28a6e9450293cb798093c63760 2024-11-17T01:28:06,532 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/ad28bb7741204a44a1cede07d2ce8295 2024-11-17T01:28:06,533 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0be95ad1bba244bc8e28f463b7aee387, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c33215e2da6b469ba04f278ceaf468a9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/bac73e7c82bb47cea576e47183ccbeb2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/627a0f2257f6493281756362280342b7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/3e860ae7dafa4404855b0f6fa8eec03c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12] to archive 2024-11-17T01:28:06,533 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:06,535 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c81835745daa4d839ad7947450c0579c 2024-11-17T01:28:06,536 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f131c4d669b344f8b0405745a113fba6 2024-11-17T01:28:06,537 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/ce3155069aab490f87f0bf340ef1987f 2024-11-17T01:28:06,538 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0be95ad1bba244bc8e28f463b7aee387 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0be95ad1bba244bc8e28f463b7aee387 2024-11-17T01:28:06,539 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/73e44cbc129a4c9f84ae8c5a6c32da7c 2024-11-17T01:28:06,540 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/0c934a2c905b4431b50131b2f45ede7d 2024-11-17T01:28:06,541 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c33215e2da6b469ba04f278ceaf468a9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/c33215e2da6b469ba04f278ceaf468a9 2024-11-17T01:28:06,542 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/b7edb6f5ebd44e5e8b63e1d59828352c 2024-11-17T01:28:06,542 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/d0cbd119e50f4b3aaf7ef0b0e54906ed 2024-11-17T01:28:06,543 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/bac73e7c82bb47cea576e47183ccbeb2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/bac73e7c82bb47cea576e47183ccbeb2 2024-11-17T01:28:06,544 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df818be45e5e4c7cb5888618175994a7 2024-11-17T01:28:06,545 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/8ab826a9c1dc4d69858c1dbfacc12eae 2024-11-17T01:28:06,546 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/907c3ffc5be14afe8f4e5cdd36f07809 2024-11-17T01:28:06,546 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/70a76224f89b492c9033f9446eaf0ab2 2024-11-17T01:28:06,547 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/4042e342c04043db88ffb14eb6056b92 2024-11-17T01:28:06,548 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/627a0f2257f6493281756362280342b7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/627a0f2257f6493281756362280342b7 2024-11-17T01:28:06,549 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/fdb6d53f14554cceb46009fb82e8f49d 2024-11-17T01:28:06,550 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/2eb39378baa04de38be395abbe90af9e 2024-11-17T01:28:06,550 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/3e860ae7dafa4404855b0f6fa8eec03c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/3e860ae7dafa4404855b0f6fa8eec03c 2024-11-17T01:28:06,551 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a754df3984dc4974893c6f426b9b616d 2024-11-17T01:28:06,552 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/51c1d88dedae4b88946af82cd3bd06c2 2024-11-17T01:28:06,553 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/df94c5f8727b45b7ae72c387a883ee12 2024-11-17T01:28:06,554 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b4cd48cddd0843ba9bf85316fcdb4779, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/91c401eafb1d4a7bb5075e6d133529be, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0d0f0b85773d40b18c35d67d20004bc7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ba683b0cad994bba94bca5a10c86c9b1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503] to archive 2024-11-17T01:28:06,555 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:06,556 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/60d53496adc041fba3ac4266c20a4f8f 2024-11-17T01:28:06,557 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6d046b90d15f4af59b6549dcd7006129 2024-11-17T01:28:06,558 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/4069a118158241e980b93a84857ff005 2024-11-17T01:28:06,559 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b4cd48cddd0843ba9bf85316fcdb4779 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b4cd48cddd0843ba9bf85316fcdb4779 2024-11-17T01:28:06,559 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/c46844b63595425c80404f8b868ab4d6 2024-11-17T01:28:06,560 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/64c7f80b001c4258ba51a2420b4edc94 2024-11-17T01:28:06,561 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/91c401eafb1d4a7bb5075e6d133529be to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/91c401eafb1d4a7bb5075e6d133529be 2024-11-17T01:28:06,562 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/8c495418a15743fb871a92446fc9a62d 2024-11-17T01:28:06,563 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/f3e9b7d4229e426f847a3c8f984e1c69 2024-11-17T01:28:06,563 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0d0f0b85773d40b18c35d67d20004bc7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0d0f0b85773d40b18c35d67d20004bc7 2024-11-17T01:28:06,564 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5f6d27f5ea1146e9b1674cbc26b63d26 2024-11-17T01:28:06,565 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/9db5b1996f1a4f4e92e83977deeeb0d2 2024-11-17T01:28:06,566 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ac65ea82ab924295b133c021f969ff16 2024-11-17T01:28:06,567 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/24f9db720fa1449485a023f08c74ed46 2024-11-17T01:28:06,567 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0b0d1f5e28ce4f7896efdef4bd42234c 2024-11-17T01:28:06,568 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ba683b0cad994bba94bca5a10c86c9b1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ba683b0cad994bba94bca5a10c86c9b1 2024-11-17T01:28:06,569 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/ea67b177fae044f4b2b32836bc939799 2024-11-17T01:28:06,570 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/b7f6f0fca5e44848abecff781cd7869b 2024-11-17T01:28:06,570 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0ddbaa63f19c4bb7966fe7ed2bc1e0d7 2024-11-17T01:28:06,571 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/5793c5caa5f34394bd56522dcd44a497 2024-11-17T01:28:06,572 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/883235ce019f4c108593836767a67f61 2024-11-17T01:28:06,573 DEBUG [StoreCloser-TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/a6cb3dacc776404fad9e2f98cada6503 2024-11-17T01:28:06,577 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/recovered.edits/376.seqid, newMaxSeqId=376, maxSeqId=1 2024-11-17T01:28:06,577 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35. 2024-11-17T01:28:06,577 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 8d60f9787865bae9bdb70c9c59e45f35: 2024-11-17T01:28:06,578 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:28:06,579 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=8d60f9787865bae9bdb70c9c59e45f35, regionState=CLOSED 2024-11-17T01:28:06,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-17T01:28:06,581 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 8d60f9787865bae9bdb70c9c59e45f35, server=04f7e7347dc7,37721,1731806791503 in 1.5080 sec 2024-11-17T01:28:06,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-17T01:28:06,582 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=8d60f9787865bae9bdb70c9c59e45f35, UNASSIGN in 1.5120 sec 2024-11-17T01:28:06,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-17T01:28:06,583 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5150 sec 2024-11-17T01:28:06,584 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806886584"}]},"ts":"1731806886584"} 2024-11-17T01:28:06,585 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:28:06,625 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:28:06,627 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5860 sec 2024-11-17T01:28:07,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-17T01:28:07,154 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-17T01:28:07,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:28:07,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,160 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,161 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-17T01:28:07,165 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:28:07,167 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/recovered.edits] 2024-11-17T01:28:07,171 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/223200e884704aafa63c1af40d922cb1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/223200e884704aafa63c1af40d922cb1 2024-11-17T01:28:07,172 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/4b1dbb94f3144dca9e4d6c08fa57d48f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/4b1dbb94f3144dca9e4d6c08fa57d48f 2024-11-17T01:28:07,173 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9de2f883c20b47b98f83259b845ff9f8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/9de2f883c20b47b98f83259b845ff9f8 2024-11-17T01:28:07,175 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e18d88486f5046da9e7378c72e8f19da to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/A/e18d88486f5046da9e7378c72e8f19da 2024-11-17T01:28:07,178 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/884dabf49cd544c29b3f4908d42c0bc1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/884dabf49cd544c29b3f4908d42c0bc1 2024-11-17T01:28:07,179 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a02c8677ecea43d9ab2a78e12cd3c839 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a02c8677ecea43d9ab2a78e12cd3c839 2024-11-17T01:28:07,181 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a33fe72b3e3542178e64d828820eb438 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/a33fe72b3e3542178e64d828820eb438 2024-11-17T01:28:07,183 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f0c8e930874d4dfd86642f044f1f4de4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/B/f0c8e930874d4dfd86642f044f1f4de4 2024-11-17T01:28:07,186 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0c3f7731af0349e8a33859d2ae27c31f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/0c3f7731af0349e8a33859d2ae27c31f 2024-11-17T01:28:07,187 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6eef99897d55413e80dd26f6112bfd3e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/6eef99897d55413e80dd26f6112bfd3e 2024-11-17T01:28:07,189 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/7517c23535e84be2aa5555326d28afcb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/7517c23535e84be2aa5555326d28afcb 2024-11-17T01:28:07,190 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/d91f7d6a47914048a74fb8f23e423b43 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/C/d91f7d6a47914048a74fb8f23e423b43 2024-11-17T01:28:07,194 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/recovered.edits/376.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35/recovered.edits/376.seqid 2024-11-17T01:28:07,195 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/8d60f9787865bae9bdb70c9c59e45f35 2024-11-17T01:28:07,195 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:28:07,197 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,205 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:28:07,207 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:28:07,209 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,209 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:28:07,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806887209"}]},"ts":"9223372036854775807"} 2024-11-17T01:28:07,211 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:28:07,211 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 8d60f9787865bae9bdb70c9c59e45f35, NAME => 'TestAcidGuarantees,,1731806858404.8d60f9787865bae9bdb70c9c59e45f35.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:28:07,211 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:28:07,211 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806887211"}]},"ts":"9223372036854775807"} 2024-11-17T01:28:07,213 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:28:07,225 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,227 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 70 msec 2024-11-17T01:28:07,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-17T01:28:07,264 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-17T01:28:07,278 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=238 (was 241), OpenFileDescriptor=451 (was 463), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=303 (was 313), ProcessCount=11 (was 11), AvailableMemoryMB=3879 (was 3915) 2024-11-17T01:28:07,285 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=451, MaxFileDescriptor=1048576, SystemLoadAverage=303, ProcessCount=11, AvailableMemoryMB=3878 2024-11-17T01:28:07,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:28:07,287 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:28:07,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:07,289 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:28:07,289 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:07,289 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-17T01:28:07,290 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:28:07,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:07,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742162_1338 (size=960) 2024-11-17T01:28:07,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:07,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:07,702 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:28:07,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742163_1339 (size=53) 2024-11-17T01:28:07,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:08,115 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:28:08,115 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9218423c208ee1c122ec7fafbd087d74, disabling compactions & flushes 2024-11-17T01:28:08,115 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,115 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,115 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. after waiting 0 ms 2024-11-17T01:28:08,115 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,116 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,116 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:08,118 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:28:08,119 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806888118"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806888118"}]},"ts":"1731806888118"} 2024-11-17T01:28:08,121 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:28:08,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:28:08,123 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806888122"}]},"ts":"1731806888122"} 2024-11-17T01:28:08,124 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:28:08,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, ASSIGN}] 2024-11-17T01:28:08,177 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, ASSIGN 2024-11-17T01:28:08,178 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:28:08,329 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:08,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:08,485 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:08,491 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,491 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:28:08,492 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,492 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:28:08,492 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,492 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,496 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,498 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:08,498 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName A 2024-11-17T01:28:08,498 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:08,499 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:08,499 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,500 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:08,501 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName B 2024-11-17T01:28:08,501 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:08,501 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:08,501 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,503 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:08,503 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName C 2024-11-17T01:28:08,503 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:08,504 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:08,504 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,505 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,505 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,506 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:28:08,507 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:08,509 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:28:08,510 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 9218423c208ee1c122ec7fafbd087d74; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69327624, jitterRate=0.033062100410461426}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:28:08,510 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:08,511 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., pid=100, masterSystemTime=1731806888485 2024-11-17T01:28:08,512 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,512 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:08,512 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:08,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-17T01:28:08,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 in 181 msec 2024-11-17T01:28:08,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-17T01:28:08,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, ASSIGN in 339 msec 2024-11-17T01:28:08,516 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:28:08,516 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806888516"}]},"ts":"1731806888516"} 2024-11-17T01:28:08,517 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:28:08,559 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:28:08,560 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2720 sec 2024-11-17T01:28:09,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-17T01:28:09,401 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-17T01:28:09,405 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34becda3 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f7f772a 2024-11-17T01:28:09,452 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b976e1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:09,455 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:09,456 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43012, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:09,458 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:28:09,459 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:28:09,462 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:28:09,462 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:28:09,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:09,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742164_1340 (size=996) 2024-11-17T01:28:09,876 DEBUG [PEWorker-5 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-17T01:28:09,876 INFO [PEWorker-5 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-17T01:28:09,881 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:28:09,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, REOPEN/MOVE}] 2024-11-17T01:28:09,886 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, REOPEN/MOVE 2024-11-17T01:28:09,887 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:09,888 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:28:09,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:10,040 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,041 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,041 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:28:10,041 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 9218423c208ee1c122ec7fafbd087d74, disabling compactions & flushes 2024-11-17T01:28:10,042 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,042 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,042 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. after waiting 0 ms 2024-11-17T01:28:10,042 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,079 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-17T01:28:10,080 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,080 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:10,081 WARN [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 9218423c208ee1c122ec7fafbd087d74 to self. 2024-11-17T01:28:10,083 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,084 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=CLOSED 2024-11-17T01:28:10,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-17T01:28:10,089 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 in 198 msec 2024-11-17T01:28:10,090 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, REOPEN/MOVE; state=CLOSED, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=true 2024-11-17T01:28:10,240 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:10,392 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,398 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,398 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:28:10,399 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,399 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:28:10,399 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,399 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,402 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,403 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:10,403 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName A 2024-11-17T01:28:10,405 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:10,406 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:10,407 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,407 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:10,408 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName B 2024-11-17T01:28:10,408 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:10,408 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:10,408 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,409 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:10,409 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9218423c208ee1c122ec7fafbd087d74 columnFamilyName C 2024-11-17T01:28:10,409 DEBUG [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:10,410 INFO [StoreOpener-9218423c208ee1c122ec7fafbd087d74-1 {}] regionserver.HStore(327): Store=9218423c208ee1c122ec7fafbd087d74/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:10,410 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,410 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,412 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,413 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:28:10,415 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,416 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 9218423c208ee1c122ec7fafbd087d74; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63657338, jitterRate=-0.051431745290756226}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:28:10,416 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:10,417 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., pid=105, masterSystemTime=1731806890392 2024-11-17T01:28:10,419 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,419 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,419 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=OPEN, openSeqNum=5, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-17T01:28:10,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 in 179 msec 2024-11-17T01:28:10,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-17T01:28:10,422 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, REOPEN/MOVE in 536 msec 2024-11-17T01:28:10,423 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-17T01:28:10,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 541 msec 2024-11-17T01:28:10,425 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 961 msec 2024-11-17T01:28:10,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-17T01:28:10,427 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5fe71801 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@bf5e2f0 2024-11-17T01:28:10,450 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b82ba2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,451 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x51f7d511 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@75b14fbd 2024-11-17T01:28:10,458 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7b6cf8cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,459 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1dc42ea6 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62f74604 2024-11-17T01:28:10,467 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ec15031, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,468 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x117e86d9 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@49e13594 2024-11-17T01:28:10,475 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3dd5b441, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,476 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6cd96549 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c54a0d3 2024-11-17T01:28:10,483 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c336ea4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,485 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3875c8c5 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f94d721 2024-11-17T01:28:10,492 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aee939b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,492 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0801ba40 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@319559be 2024-11-17T01:28:10,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f49665c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x27539bdc to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c907e21 2024-11-17T01:28:10,508 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@683f8469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,509 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e3203d9 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@61ec0f48 2024-11-17T01:28:10,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75e4d3d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x798e7fd4 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7819b9e2 2024-11-17T01:28:10,525 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b308f62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:10,528 DEBUG [hconnection-0x5411e509-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,528 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-17T01:28:10,529 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43016, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,529 DEBUG [hconnection-0x4573f4b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:10,529 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:10,529 DEBUG [hconnection-0x7ecf4618-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,529 DEBUG [hconnection-0x75885f97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,530 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:10,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:10,530 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43026, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,530 DEBUG [hconnection-0x4edb326c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,530 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43036, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,530 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,531 DEBUG [hconnection-0x1747789-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,531 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43050, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,531 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43052, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,531 DEBUG [hconnection-0x1ca273be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,534 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43056, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,534 DEBUG [hconnection-0x2d79ea1f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,535 DEBUG [hconnection-0x45b283ed-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,535 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43068, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,536 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43074, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,536 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:10,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:10,539 DEBUG [hconnection-0x9f3985b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:10,543 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43088, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:10,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806950552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806950552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806950555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806950559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806950560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,577 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411170f79334c7cde46ceba2595f3d6eaab67_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806890536/Put/seqid=0 2024-11-17T01:28:10,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742165_1341 (size=12154) 2024-11-17T01:28:10,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:10,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806950661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,667 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806950661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806950666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,668 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806950667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,670 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806950667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,682 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:10,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:10,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,683 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:10,835 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,835 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,835 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806950869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806950869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806950870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806950872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:10,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806950872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,981 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:10,983 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411170f79334c7cde46ceba2595f3d6eaab67_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411170f79334c7cde46ceba2595f3d6eaab67_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:10,984 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd5a55039b4f46fa8f98dce4b2ba8734, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:10,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd5a55039b4f46fa8f98dce4b2ba8734 is 175, key is test_row_0/A:col10/1731806890536/Put/seqid=0 2024-11-17T01:28:10,987 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:10,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:10,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:10,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:10,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:10,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742166_1342 (size=30955) 2024-11-17T01:28:10,988 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd5a55039b4f46fa8f98dce4b2ba8734 2024-11-17T01:28:10,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/5c584a97011f421ea41cffed829f967b is 50, key is test_row_0/B:col10/1731806890536/Put/seqid=0 2024-11-17T01:28:11,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742167_1343 (size=12001) 2024-11-17T01:28:11,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:11,139 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,140 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:11,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:11,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,178 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806951175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806951175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,178 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806951175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,179 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806951176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,181 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806951178, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,292 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:11,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,292 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,409 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/5c584a97011f421ea41cffed829f967b 2024-11-17T01:28:11,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/9fa7d4147e834641869580f58176ed9c is 50, key is test_row_0/C:col10/1731806890536/Put/seqid=0 2024-11-17T01:28:11,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742168_1344 (size=12001) 2024-11-17T01:28:11,435 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/9fa7d4147e834641869580f58176ed9c 2024-11-17T01:28:11,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd5a55039b4f46fa8f98dce4b2ba8734 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734 2024-11-17T01:28:11,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734, entries=150, sequenceid=18, filesize=30.2 K 2024-11-17T01:28:11,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/5c584a97011f421ea41cffed829f967b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b 2024-11-17T01:28:11,444 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:11,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:11,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b, entries=150, sequenceid=18, filesize=11.7 K 2024-11-17T01:28:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:11,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/9fa7d4147e834641869580f58176ed9c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c 2024-11-17T01:28:11,449 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c, entries=150, sequenceid=18, filesize=11.7 K 2024-11-17T01:28:11,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9218423c208ee1c122ec7fafbd087d74 in 914ms, sequenceid=18, compaction requested=false 2024-11-17T01:28:11,450 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-17T01:28:11,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:11,597 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-17T01:28:11,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:11,597 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:28:11,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:11,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:11,603 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117c4b555dd89584ecca8eaaad7fcbc495f_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806890551/Put/seqid=0 2024-11-17T01:28:11,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742169_1345 (size=12154) 2024-11-17T01:28:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:11,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:11,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806951692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806951692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806951693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806951698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806951698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806951799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806951799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806951799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806951803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:11,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806951803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,003 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806952001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806952002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,004 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806952002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:12,011 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117c4b555dd89584ecca8eaaad7fcbc495f_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c4b555dd89584ecca8eaaad7fcbc495f_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:12,011 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806952007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806952008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/07c16f3fa6bf4c679b7c23c2e3a2b815, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:12,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/07c16f3fa6bf4c679b7c23c2e3a2b815 is 175, key is test_row_0/A:col10/1731806890551/Put/seqid=0 2024-11-17T01:28:12,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742170_1346 (size=30955) 2024-11-17T01:28:12,019 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/07c16f3fa6bf4c679b7c23c2e3a2b815 2024-11-17T01:28:12,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/b00a8bc604214b32ba1040a2745438f6 is 50, key is test_row_0/B:col10/1731806890551/Put/seqid=0 2024-11-17T01:28:12,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742171_1347 (size=12001) 2024-11-17T01:28:12,109 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:28:12,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806952305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806952305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806952305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806952313, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806952315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,432 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/b00a8bc604214b32ba1040a2745438f6 2024-11-17T01:28:12,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f81806a617134dc0a67391e9bf989a96 is 50, key is test_row_0/C:col10/1731806890551/Put/seqid=0 2024-11-17T01:28:12,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742172_1348 (size=12001) 2024-11-17T01:28:12,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:12,811 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806952810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,812 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806952810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806952812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806952818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,821 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:12,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806952819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:12,841 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f81806a617134dc0a67391e9bf989a96 2024-11-17T01:28:12,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/07c16f3fa6bf4c679b7c23c2e3a2b815 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815 2024-11-17T01:28:12,848 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815, entries=150, sequenceid=40, filesize=30.2 K 2024-11-17T01:28:12,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/b00a8bc604214b32ba1040a2745438f6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6 2024-11-17T01:28:12,851 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:28:12,853 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f81806a617134dc0a67391e9bf989a96 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96 2024-11-17T01:28:12,858 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:28:12,859 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 9218423c208ee1c122ec7fafbd087d74 in 1262ms, sequenceid=40, compaction requested=false 2024-11-17T01:28:12,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:12,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:12,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-17T01:28:12,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-17T01:28:12,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-17T01:28:12,861 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3300 sec 2024-11-17T01:28:12,862 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.3330 sec 2024-11-17T01:28:13,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:13,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:28:13,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:13,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:13,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:13,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:13,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:13,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:13,823 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ea8de26a076b4ef6a3f051fb485384bd_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:13,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742173_1349 (size=14594) 2024-11-17T01:28:13,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806953840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806953845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806953846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806953847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806953848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806953949, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806953953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,958 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806953953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806953953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:13,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:13,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806953955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,156 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806954154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,162 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806954159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806954159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806954160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806954160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,227 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:14,229 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ea8de26a076b4ef6a3f051fb485384bd_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea8de26a076b4ef6a3f051fb485384bd_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:14,230 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/d246f3109e9947b08a219e5e93ed9c40, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:14,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/d246f3109e9947b08a219e5e93ed9c40 is 175, key is test_row_0/A:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:14,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742174_1350 (size=39549) 2024-11-17T01:28:14,462 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806954459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806954464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806954465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806954465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,469 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806954466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-17T01:28:14,633 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-17T01:28:14,634 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:14,634 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=56, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/d246f3109e9947b08a219e5e93ed9c40 2024-11-17T01:28:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-17T01:28:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:14,635 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:14,636 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:14,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:14,641 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/79605b47d2de4057a1bbbf19ba2cfb7a is 50, key is test_row_0/B:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:14,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742175_1351 (size=12001) 2024-11-17T01:28:14,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:14,787 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:14,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:14,939 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:14,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:14,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806954965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806954970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806954970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806954971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:14,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:14,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806954974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:15,044 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/79605b47d2de4057a1bbbf19ba2cfb7a 2024-11-17T01:28:15,051 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cf4881058de142e8a83bfd42a86979d0 is 50, key is test_row_0/C:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:15,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742176_1352 (size=12001) 2024-11-17T01:28:15,092 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:15,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:15,245 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:15,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,397 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:15,398 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:15,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:15,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,398 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:15,455 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cf4881058de142e8a83bfd42a86979d0 2024-11-17T01:28:15,486 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/d246f3109e9947b08a219e5e93ed9c40 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40 2024-11-17T01:28:15,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40, entries=200, sequenceid=56, filesize=38.6 K 2024-11-17T01:28:15,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/79605b47d2de4057a1bbbf19ba2cfb7a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a 2024-11-17T01:28:15,493 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a, entries=150, sequenceid=56, filesize=11.7 K 2024-11-17T01:28:15,494 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cf4881058de142e8a83bfd42a86979d0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0 2024-11-17T01:28:15,496 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0, entries=150, sequenceid=56, filesize=11.7 K 2024-11-17T01:28:15,497 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 9218423c208ee1c122ec7fafbd087d74 in 1680ms, sequenceid=56, compaction requested=true 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:15,497 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:15,497 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:15,497 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:15,498 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:15,498 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:15,498 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/B is initiating minor compaction (all files) 2024-11-17T01:28:15,498 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/A is initiating minor compaction (all files) 2024-11-17T01:28:15,498 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/A in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,498 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/B in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,498 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.2 K 2024-11-17T01:28:15,498 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=99.1 K 2024-11-17T01:28:15,498 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40] 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c584a97011f421ea41cffed829f967b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806890535 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd5a55039b4f46fa8f98dce4b2ba8734, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806890535 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting b00a8bc604214b32ba1040a2745438f6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806890550 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07c16f3fa6bf4c679b7c23c2e3a2b815, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806890550 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 79605b47d2de4057a1bbbf19ba2cfb7a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:15,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting d246f3109e9947b08a219e5e93ed9c40, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:15,506 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#B#compaction#300 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:15,507 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/d140cdb488e241b2b6d2a83b39368396 is 50, key is test_row_0/B:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:15,507 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:15,509 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117298476a0e5d04469a7f0bb2a3bae2d26_9218423c208ee1c122ec7fafbd087d74 store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:15,512 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117298476a0e5d04469a7f0bb2a3bae2d26_9218423c208ee1c122ec7fafbd087d74, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:15,512 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117298476a0e5d04469a7f0bb2a3bae2d26_9218423c208ee1c122ec7fafbd087d74 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:15,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742177_1353 (size=12104) 2024-11-17T01:28:15,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742178_1354 (size=4469) 2024-11-17T01:28:15,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:15,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-17T01:28:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,550 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:15,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:15,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111790388353b2bb4c05b6a6e5ecceeec023_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806893845/Put/seqid=0 2024-11-17T01:28:15,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742179_1355 (size=12154) 2024-11-17T01:28:15,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:15,922 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/d140cdb488e241b2b6d2a83b39368396 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d140cdb488e241b2b6d2a83b39368396 2024-11-17T01:28:15,926 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/B of 9218423c208ee1c122ec7fafbd087d74 into d140cdb488e241b2b6d2a83b39368396(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:15,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:15,926 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/B, priority=13, startTime=1731806895497; duration=0sec 2024-11-17T01:28:15,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:15,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:B 2024-11-17T01:28:15,926 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:15,927 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:15,927 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/C is initiating minor compaction (all files) 2024-11-17T01:28:15,927 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/C in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:15,927 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.2 K 2024-11-17T01:28:15,927 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#A#compaction#301 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:15,928 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9fa7d4147e834641869580f58176ed9c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806890535 2024-11-17T01:28:15,928 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4ddb0269630e4c04806b99ba83354312 is 175, key is test_row_0/A:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:15,928 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f81806a617134dc0a67391e9bf989a96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806890550 2024-11-17T01:28:15,928 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting cf4881058de142e8a83bfd42a86979d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:15,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742180_1356 (size=31058) 2024-11-17T01:28:15,935 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#C#compaction#303 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:15,935 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4ddb0269630e4c04806b99ba83354312 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312 2024-11-17T01:28:15,936 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f7bc8c49f67f44caa32e38ba31956fde is 50, key is test_row_0/C:col10/1731806891684/Put/seqid=0 2024-11-17T01:28:15,940 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/A of 9218423c208ee1c122ec7fafbd087d74 into 4ddb0269630e4c04806b99ba83354312(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:15,940 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:15,940 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/A, priority=13, startTime=1731806895497; duration=0sec 2024-11-17T01:28:15,940 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:15,940 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:A 2024-11-17T01:28:15,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742181_1357 (size=12104) 2024-11-17T01:28:15,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:15,965 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111790388353b2bb4c05b6a6e5ecceeec023_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111790388353b2bb4c05b6a6e5ecceeec023_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:15,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c0097d2eca8c43a0b6247ff8f73b0b11, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:15,966 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c0097d2eca8c43a0b6247ff8f73b0b11 is 175, key is test_row_0/A:col10/1731806893845/Put/seqid=0 2024-11-17T01:28:15,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742182_1358 (size=30955) 2024-11-17T01:28:15,970 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c0097d2eca8c43a0b6247ff8f73b0b11 2024-11-17T01:28:15,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:15,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/14f90a04c42e435b98bec5813e939dfe is 50, key is test_row_0/B:col10/1731806893845/Put/seqid=0 2024-11-17T01:28:15,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:15,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742183_1359 (size=12001) 2024-11-17T01:28:16,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806955992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,008 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806955997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806955998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806955999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,009 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806955999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806956102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806956109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,112 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806956110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806956110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,116 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806956110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806956305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806956314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,318 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806956314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806956316, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,319 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806956317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,352 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f7bc8c49f67f44caa32e38ba31956fde as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f7bc8c49f67f44caa32e38ba31956fde 2024-11-17T01:28:16,356 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/C of 9218423c208ee1c122ec7fafbd087d74 into f7bc8c49f67f44caa32e38ba31956fde(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:16,356 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:16,356 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/C, priority=13, startTime=1731806895497; duration=0sec 2024-11-17T01:28:16,356 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:16,356 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:C 2024-11-17T01:28:16,388 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/14f90a04c42e435b98bec5813e939dfe 2024-11-17T01:28:16,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/872fd558a9f641079d42a5c5b3503d2b is 50, key is test_row_0/C:col10/1731806893845/Put/seqid=0 2024-11-17T01:28:16,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742184_1360 (size=12001) 2024-11-17T01:28:16,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806956608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806956619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806956619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806956620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806956620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:16,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:16,798 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/872fd558a9f641079d42a5c5b3503d2b 2024-11-17T01:28:16,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c0097d2eca8c43a0b6247ff8f73b0b11 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11 2024-11-17T01:28:16,805 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11, entries=150, sequenceid=76, filesize=30.2 K 2024-11-17T01:28:16,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/14f90a04c42e435b98bec5813e939dfe as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe 2024-11-17T01:28:16,809 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe, entries=150, sequenceid=76, filesize=11.7 K 2024-11-17T01:28:16,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/872fd558a9f641079d42a5c5b3503d2b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b 2024-11-17T01:28:16,812 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b, entries=150, sequenceid=76, filesize=11.7 K 2024-11-17T01:28:16,813 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 9218423c208ee1c122ec7fafbd087d74 in 1263ms, sequenceid=76, compaction requested=false 2024-11-17T01:28:16,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:16,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:16,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-17T01:28:16,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-17T01:28:16,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-17T01:28:16,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1780 sec 2024-11-17T01:28:16,816 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.1810 sec 2024-11-17T01:28:17,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:17,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-17T01:28:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:17,119 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:17,120 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:17,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411171c5460d601c84115b3c4d8ab39b83d4d_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:17,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742185_1361 (size=14594) 2024-11-17T01:28:17,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806957148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806957149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806957154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806957155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806957155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806957258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806957258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806957262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806957262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806957262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806957465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,468 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806957465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,472 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806957466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806957467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,472 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806957467, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,530 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:17,533 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411171c5460d601c84115b3c4d8ab39b83d4d_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411171c5460d601c84115b3c4d8ab39b83d4d_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:17,534 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/742140faded541bebbcccb7091ce62ab, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:17,534 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/742140faded541bebbcccb7091ce62ab is 175, key is test_row_0/A:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:17,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742186_1362 (size=39549) 2024-11-17T01:28:17,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806957770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806957770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806957773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806957774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,779 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:17,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806957775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:17,945 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=96, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/742140faded541bebbcccb7091ce62ab 2024-11-17T01:28:17,952 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 is 50, key is test_row_0/B:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:17,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742187_1363 (size=12001) 2024-11-17T01:28:18,282 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:18,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:18,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806958278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806958277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806958280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:18,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806958283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:18,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806958284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,355 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 2024-11-17T01:28:18,362 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ea979d4fec4140c597c9c418cc30fcd7 is 50, key is test_row_0/C:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:18,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742188_1364 (size=12001) 2024-11-17T01:28:18,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-17T01:28:18,739 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-17T01:28:18,740 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:18,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-17T01:28:18,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:18,742 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:18,742 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:18,742 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:18,765 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ea979d4fec4140c597c9c418cc30fcd7 2024-11-17T01:28:18,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/742140faded541bebbcccb7091ce62ab as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab 2024-11-17T01:28:18,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab, entries=200, sequenceid=96, filesize=38.6 K 2024-11-17T01:28:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 2024-11-17T01:28:18,775 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782, entries=150, sequenceid=96, filesize=11.7 K 2024-11-17T01:28:18,776 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ea979d4fec4140c597c9c418cc30fcd7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7 2024-11-17T01:28:18,779 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7, entries=150, sequenceid=96, filesize=11.7 K 2024-11-17T01:28:18,779 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9218423c208ee1c122ec7fafbd087d74 in 1660ms, sequenceid=96, compaction requested=true 2024-11-17T01:28:18,779 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:18,780 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/A is initiating minor compaction (all files) 2024-11-17T01:28:18,780 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/B is initiating minor compaction (all files) 2024-11-17T01:28:18,781 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/A in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:18,781 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/B in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:18,781 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=99.2 K 2024-11-17T01:28:18,781 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d140cdb488e241b2b6d2a83b39368396, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.3 K 2024-11-17T01:28:18,781 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab] 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ddb0269630e4c04806b99ba83354312, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d140cdb488e241b2b6d2a83b39368396, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0097d2eca8c43a0b6247ff8f73b0b11, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806893845 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 14f90a04c42e435b98bec5813e939dfe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806893845 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 742140faded541bebbcccb7091ce62ab, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:18,781 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 7e4fe7b75e5b4fc3a98bf703ba4e5782, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:18,786 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:18,787 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#B#compaction#309 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:18,788 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/68f1d52d11cd40e4aad08c690322c72a is 50, key is test_row_0/B:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:18,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111732ce72b4802f4e38aa59d14d18a066cb_9218423c208ee1c122ec7fafbd087d74 store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:18,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742189_1365 (size=12207) 2024-11-17T01:28:18,791 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111732ce72b4802f4e38aa59d14d18a066cb_9218423c208ee1c122ec7fafbd087d74, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:18,791 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111732ce72b4802f4e38aa59d14d18a066cb_9218423c208ee1c122ec7fafbd087d74 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:18,795 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/68f1d52d11cd40e4aad08c690322c72a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/68f1d52d11cd40e4aad08c690322c72a 2024-11-17T01:28:18,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742190_1366 (size=4469) 2024-11-17T01:28:18,796 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#A#compaction#310 average throughput is 2.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:18,797 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/85321a334e344970bf7538cab9721a48 is 175, key is test_row_0/A:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:18,801 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/B of 9218423c208ee1c122ec7fafbd087d74 into 68f1d52d11cd40e4aad08c690322c72a(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:18,801 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:18,801 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/B, priority=13, startTime=1731806898780; duration=0sec 2024-11-17T01:28:18,801 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:18,801 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:B 2024-11-17T01:28:18,801 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:18,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742191_1367 (size=31161) 2024-11-17T01:28:18,802 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:18,803 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/C is initiating minor compaction (all files) 2024-11-17T01:28:18,803 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/C in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:18,803 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f7bc8c49f67f44caa32e38ba31956fde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.3 K 2024-11-17T01:28:18,803 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f7bc8c49f67f44caa32e38ba31956fde, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=56, earliestPutTs=1731806891684 2024-11-17T01:28:18,803 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 872fd558a9f641079d42a5c5b3503d2b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806893845 2024-11-17T01:28:18,803 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ea979d4fec4140c597c9c418cc30fcd7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:18,810 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/85321a334e344970bf7538cab9721a48 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48 2024-11-17T01:28:18,817 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/A of 9218423c208ee1c122ec7fafbd087d74 into 85321a334e344970bf7538cab9721a48(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:18,817 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:18,817 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/A, priority=13, startTime=1731806898780; duration=0sec 2024-11-17T01:28:18,817 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:18,817 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:A 2024-11-17T01:28:18,818 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#C#compaction#311 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:18,819 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/e447ff03b70244d1afd0bbdc0edf393e is 50, key is test_row_0/C:col10/1731806895988/Put/seqid=0 2024-11-17T01:28:18,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742192_1368 (size=12207) 2024-11-17T01:28:18,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:18,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:18,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:18,894 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:18,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:18,900 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111754005f0bd25b4b839a2ab23108a1c0ed_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806897154/Put/seqid=0 2024-11-17T01:28:18,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742193_1369 (size=12154) 2024-11-17T01:28:19,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:19,235 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/e447ff03b70244d1afd0bbdc0edf393e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/e447ff03b70244d1afd0bbdc0edf393e 2024-11-17T01:28:19,238 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/C of 9218423c208ee1c122ec7fafbd087d74 into e447ff03b70244d1afd0bbdc0edf393e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:19,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:19,238 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/C, priority=13, startTime=1731806898780; duration=0sec 2024-11-17T01:28:19,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:19,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:C 2024-11-17T01:28:19,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:19,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:19,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:19,306 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111754005f0bd25b4b839a2ab23108a1c0ed_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111754005f0bd25b4b839a2ab23108a1c0ed_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:19,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4cf6e4c2860c4d7b9b5e8c25fedeae13, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:19,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 is 175, key is test_row_0/A:col10/1731806897154/Put/seqid=0 2024-11-17T01:28:19,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806959303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806959304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742194_1370 (size=30955) 2024-11-17T01:28:19,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806959306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806959307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,313 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806959308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:19,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806959411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806959411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,416 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806959412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806959414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806959414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806959614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806959614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,619 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806959617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806959619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806959620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,711 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 2024-11-17T01:28:19,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/1c1b0b1f41ed4eb2b8586dcebe12e23c is 50, key is test_row_0/B:col10/1731806897154/Put/seqid=0 2024-11-17T01:28:19,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742195_1371 (size=12001) 2024-11-17T01:28:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:19,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806959919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,922 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806959919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806959922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806959924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:19,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806959924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,120 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/1c1b0b1f41ed4eb2b8586dcebe12e23c 2024-11-17T01:28:20,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/77ea4d44c014466d97ee764205de5cf9 is 50, key is test_row_0/C:col10/1731806897154/Put/seqid=0 2024-11-17T01:28:20,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742196_1372 (size=12001) 2024-11-17T01:28:20,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:20,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806960425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:20,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806960427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:20,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806960429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:20,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806960429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:20,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806960431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,530 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/77ea4d44c014466d97ee764205de5cf9 2024-11-17T01:28:20,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 2024-11-17T01:28:20,565 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13, entries=150, sequenceid=117, filesize=30.2 K 2024-11-17T01:28:20,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/1c1b0b1f41ed4eb2b8586dcebe12e23c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c 2024-11-17T01:28:20,568 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c, entries=150, sequenceid=117, filesize=11.7 K 2024-11-17T01:28:20,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/77ea4d44c014466d97ee764205de5cf9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9 2024-11-17T01:28:20,572 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9, entries=150, sequenceid=117, filesize=11.7 K 2024-11-17T01:28:20,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-17T01:28:20,573 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 9218423c208ee1c122ec7fafbd087d74 in 1679ms, sequenceid=117, compaction requested=false 2024-11-17T01:28:20,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:20,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:20,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-17T01:28:20,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-17T01:28:20,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-17T01:28:20,575 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8320 sec 2024-11-17T01:28:20,576 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.8340 sec 2024-11-17T01:28:20,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-17T01:28:20,845 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-17T01:28:20,846 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:20,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-17T01:28:20,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:20,847 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:20,848 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:20,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:20,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:20,998 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:20,998 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:20,999 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:20,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:21,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111706fe62a1643c4dcb8731f7955fd04b68_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806899292/Put/seqid=0 2024-11-17T01:28:21,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742197_1373 (size=12254) 2024-11-17T01:28:21,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:21,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:21,412 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111706fe62a1643c4dcb8731f7955fd04b68_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111706fe62a1643c4dcb8731f7955fd04b68_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:21,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/a1b266ece137426ea5cb4a269a956ef5, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:21,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/a1b266ece137426ea5cb4a269a956ef5 is 175, key is test_row_0/A:col10/1731806899292/Put/seqid=0 2024-11-17T01:28:21,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742198_1374 (size=31055) 2024-11-17T01:28:21,416 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=135, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/a1b266ece137426ea5cb4a269a956ef5 2024-11-17T01:28:21,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/415ff4589c3443ffadd58056e30ce52c is 50, key is test_row_0/B:col10/1731806899292/Put/seqid=0 2024-11-17T01:28:21,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742199_1375 (size=12101) 2024-11-17T01:28:21,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:21,433 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:21,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:21,458 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806961452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806961452, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806961453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,459 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806961453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806961454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806961559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806961559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806961560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806961560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806961561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806961763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806961763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806961763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806961764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:21,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806961764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:21,825 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/415ff4589c3443ffadd58056e30ce52c 2024-11-17T01:28:21,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/6ab4a1c5338541aca685f3aec0bac3ad is 50, key is test_row_0/C:col10/1731806899292/Put/seqid=0 2024-11-17T01:28:21,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742200_1376 (size=12101) 2024-11-17T01:28:21,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:22,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806962065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806962067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806962069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,074 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806962070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,076 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806962073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,238 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=135 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/6ab4a1c5338541aca685f3aec0bac3ad 2024-11-17T01:28:22,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/a1b266ece137426ea5cb4a269a956ef5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5 2024-11-17T01:28:22,245 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5, entries=150, sequenceid=135, filesize=30.3 K 2024-11-17T01:28:22,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/415ff4589c3443ffadd58056e30ce52c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c 2024-11-17T01:28:22,248 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c, entries=150, sequenceid=135, filesize=11.8 K 2024-11-17T01:28:22,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/6ab4a1c5338541aca685f3aec0bac3ad as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad 2024-11-17T01:28:22,252 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad, entries=150, sequenceid=135, filesize=11.8 K 2024-11-17T01:28:22,252 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 9218423c208ee1c122ec7fafbd087d74 in 1253ms, sequenceid=135, compaction requested=true 2024-11-17T01:28:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:22,252 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-17T01:28:22,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-17T01:28:22,254 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-17T01:28:22,254 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4050 sec 2024-11-17T01:28:22,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.4090 sec 2024-11-17T01:28:22,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:22,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:28:22,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:22,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:22,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:22,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:22,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:22,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:22,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411176cd1e82904444b4d9f873ce887116fa8_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:22,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742201_1377 (size=17284) 2024-11-17T01:28:22,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806962591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806962591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806962592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806962596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806962598, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806962699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806962699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806962699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,707 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806962703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806962705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806962906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,910 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806962906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806962907, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,912 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806962908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:22,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806962911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:22,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-17T01:28:22,950 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-17T01:28:22,951 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-17T01:28:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:22,952 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:22,953 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:22,953 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:22,986 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:22,988 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411176cd1e82904444b4d9f873ce887116fa8_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411176cd1e82904444b4d9f873ce887116fa8_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:22,989 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c3ea530a7d5a4281991d8470110d1c11, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:22,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c3ea530a7d5a4281991d8470110d1c11 is 175, key is test_row_0/A:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:22,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742202_1378 (size=48389) 2024-11-17T01:28:23,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:23,104 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,105 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806963211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,216 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806963212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806963213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806963214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806963218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:23,256 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,257 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,257 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,393 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=156, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c3ea530a7d5a4281991d8470110d1c11 2024-11-17T01:28:23,398 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/09a362418d174bc4a651eb3f3b1bda32 is 50, key is test_row_0/B:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:23,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742203_1379 (size=12151) 2024-11-17T01:28:23,409 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,409 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:23,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,561 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,561 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,562 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,713 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,713 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,714 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,714 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806963718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806963720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806963721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806963723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806963724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,801 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/09a362418d174bc4a651eb3f3b1bda32 2024-11-17T01:28:23,807 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/a175207fafcc4fe495ff703679af6c49 is 50, key is test_row_0/C:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:23,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742204_1380 (size=12151) 2024-11-17T01:28:23,865 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:23,866 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:23,866 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:23,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,018 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,018 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:24,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:24,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,019 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:24,170 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,171 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:24,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:24,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,171 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:24,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/a175207fafcc4fe495ff703679af6c49 2024-11-17T01:28:24,216 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/c3ea530a7d5a4281991d8470110d1c11 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11 2024-11-17T01:28:24,219 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11, entries=250, sequenceid=156, filesize=47.3 K 2024-11-17T01:28:24,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/09a362418d174bc4a651eb3f3b1bda32 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32 2024-11-17T01:28:24,223 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32, entries=150, sequenceid=156, filesize=11.9 K 2024-11-17T01:28:24,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/a175207fafcc4fe495ff703679af6c49 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49 2024-11-17T01:28:24,226 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49, entries=150, sequenceid=156, filesize=11.9 K 2024-11-17T01:28:24,227 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9218423c208ee1c122ec7fafbd087d74 in 1652ms, sequenceid=156, compaction requested=true 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:24,227 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:24,227 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:28:24,227 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:24,228 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:24,229 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:28:24,229 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141560 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:28:24,229 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/A is initiating minor compaction (all files) 2024-11-17T01:28:24,229 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/B is initiating minor compaction (all files) 2024-11-17T01:28:24,230 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/B in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,230 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/A in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,230 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/68f1d52d11cd40e4aad08c690322c72a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=47.3 K 2024-11-17T01:28:24,230 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=138.2 K 2024-11-17T01:28:24,230 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,230 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11] 2024-11-17T01:28:24,230 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 68f1d52d11cd40e4aad08c690322c72a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:24,230 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85321a334e344970bf7538cab9721a48, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:24,230 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c1b0b1f41ed4eb2b8586dcebe12e23c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731806897153 2024-11-17T01:28:24,230 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4cf6e4c2860c4d7b9b5e8c25fedeae13, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731806897153 2024-11-17T01:28:24,231 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 415ff4589c3443ffadd58056e30ce52c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1731806899292 2024-11-17T01:28:24,231 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a1b266ece137426ea5cb4a269a956ef5, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1731806899292 2024-11-17T01:28:24,231 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 09a362418d174bc4a651eb3f3b1bda32, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:24,231 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3ea530a7d5a4281991d8470110d1c11, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:24,236 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:24,237 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411179c1010612cad4ce385adf45f5556e6b0_9218423c208ee1c122ec7fafbd087d74 store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:24,238 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#B#compaction#322 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:24,238 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/26d9ceaa89c1417f93b9d37d4e167374 is 50, key is test_row_0/B:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:24,239 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411179c1010612cad4ce385adf45f5556e6b0_9218423c208ee1c122ec7fafbd087d74, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:24,239 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411179c1010612cad4ce385adf45f5556e6b0_9218423c208ee1c122ec7fafbd087d74 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:24,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742206_1382 (size=4469) 2024-11-17T01:28:24,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742205_1381 (size=12493) 2024-11-17T01:28:24,323 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,323 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-17T01:28:24,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,323 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-17T01:28:24,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:24,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:24,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:24,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:24,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:24,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:24,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411177353fc1f303c4763bd2516f8fa21283d_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806902597/Put/seqid=0 2024-11-17T01:28:24,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742207_1383 (size=12304) 2024-11-17T01:28:24,647 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#A#compaction#321 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:24,648 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0 is 175, key is test_row_0/A:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:24,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742208_1384 (size=31447) 2024-11-17T01:28:24,659 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/26d9ceaa89c1417f93b9d37d4e167374 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/26d9ceaa89c1417f93b9d37d4e167374 2024-11-17T01:28:24,663 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/B of 9218423c208ee1c122ec7fafbd087d74 into 26d9ceaa89c1417f93b9d37d4e167374(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:24,663 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:24,663 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/B, priority=12, startTime=1731806904227; duration=0sec 2024-11-17T01:28:24,663 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:24,663 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:B 2024-11-17T01:28:24,663 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-17T01:28:24,664 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48460 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-17T01:28:24,664 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/C is initiating minor compaction (all files) 2024-11-17T01:28:24,664 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/C in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:24,665 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/e447ff03b70244d1afd0bbdc0edf393e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=47.3 K 2024-11-17T01:28:24,665 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e447ff03b70244d1afd0bbdc0edf393e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731806895988 2024-11-17T01:28:24,665 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 77ea4d44c014466d97ee764205de5cf9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1731806897153 2024-11-17T01:28:24,665 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ab4a1c5338541aca685f3aec0bac3ad, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=135, earliestPutTs=1731806899292 2024-11-17T01:28:24,665 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a175207fafcc4fe495ff703679af6c49, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:24,672 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#C#compaction#324 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:24,673 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/387c8411407f4dcdb7a9a40533a04ee8 is 50, key is test_row_0/C:col10/1731806901451/Put/seqid=0 2024-11-17T01:28:24,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742209_1385 (size=12493) 2024-11-17T01:28:24,730 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:24,733 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:24,736 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411177353fc1f303c4763bd2516f8fa21283d_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411177353fc1f303c4763bd2516f8fa21283d_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:24,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/6d8a4ff529284945afaed872b79c7958, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:24,738 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/6d8a4ff529284945afaed872b79c7958 is 175, key is test_row_0/A:col10/1731806902597/Put/seqid=0 2024-11-17T01:28:24,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742210_1386 (size=31105) 2024-11-17T01:28:24,742 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=171, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/6d8a4ff529284945afaed872b79c7958 2024-11-17T01:28:24,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/9e74eeb69ee748b7b451ee6221514efc is 50, key is test_row_0/B:col10/1731806902597/Put/seqid=0 2024-11-17T01:28:24,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742211_1387 (size=12151) 2024-11-17T01:28:24,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806964760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,769 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806964761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806964768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806964769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,778 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806964769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806964870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806964870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806964877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806964879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:24,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:24,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806964879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,055 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0 2024-11-17T01:28:25,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:25,058 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/A of 9218423c208ee1c122ec7fafbd087d74 into 3e7b46c3ffee4f19ae3d085e3f4fc1e0(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:25,058 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:25,058 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/A, priority=12, startTime=1731806904227; duration=0sec 2024-11-17T01:28:25,058 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:25,058 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:A 2024-11-17T01:28:25,079 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/387c8411407f4dcdb7a9a40533a04ee8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/387c8411407f4dcdb7a9a40533a04ee8 2024-11-17T01:28:25,083 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/C of 9218423c208ee1c122ec7fafbd087d74 into 387c8411407f4dcdb7a9a40533a04ee8(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:25,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:25,083 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/C, priority=12, startTime=1731806904227; duration=0sec 2024-11-17T01:28:25,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:25,083 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:C 2024-11-17T01:28:25,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806965079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806965079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806965084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806965087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806965087, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,164 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/9e74eeb69ee748b7b451ee6221514efc 2024-11-17T01:28:25,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ee944049c438463fb55d40d25176f528 is 50, key is test_row_0/C:col10/1731806902597/Put/seqid=0 2024-11-17T01:28:25,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742212_1388 (size=12151) 2024-11-17T01:28:25,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806965386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806965387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806965391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806965391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806965392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,574 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ee944049c438463fb55d40d25176f528 2024-11-17T01:28:25,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/6d8a4ff529284945afaed872b79c7958 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958 2024-11-17T01:28:25,614 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958, entries=150, sequenceid=171, filesize=30.4 K 2024-11-17T01:28:25,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/9e74eeb69ee748b7b451ee6221514efc as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc 2024-11-17T01:28:25,617 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc, entries=150, sequenceid=171, filesize=11.9 K 2024-11-17T01:28:25,618 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/ee944049c438463fb55d40d25176f528 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528 2024-11-17T01:28:25,621 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528, entries=150, sequenceid=171, filesize=11.9 K 2024-11-17T01:28:25,622 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 9218423c208ee1c122ec7fafbd087d74 in 1298ms, sequenceid=171, compaction requested=false 2024-11-17T01:28:25,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:25,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:25,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-17T01:28:25,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-17T01:28:25,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-17T01:28:25,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6700 sec 2024-11-17T01:28:25,625 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 2.6730 sec 2024-11-17T01:28:25,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:25,896 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-17T01:28:25,896 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:25,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:25,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:25,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:25,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:25,897 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:25,903 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117de884f4cd04543e38d535aff2337161e_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:25,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742213_1389 (size=14794) 2024-11-17T01:28:25,908 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:25,911 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117de884f4cd04543e38d535aff2337161e_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de884f4cd04543e38d535aff2337161e_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:25,911 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/818905493189424bb365f418796806df, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:25,912 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/818905493189424bb365f418796806df is 175, key is test_row_0/A:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:25,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742214_1390 (size=39749) 2024-11-17T01:28:25,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,918 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806965909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806965909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806965910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806965911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:25,919 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:25,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806965911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806966019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806966019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806966019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806966019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806966019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,229 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806966225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806966225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806966226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806966226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806966226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,315 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/818905493189424bb365f418796806df 2024-11-17T01:28:26,320 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/e858e83ce5aa4a0b8f9a1a77bc13a60d is 50, key is test_row_0/B:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:26,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742215_1391 (size=12151) 2024-11-17T01:28:26,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806966530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806966531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806966531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806966531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,536 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:26,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806966532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:26,724 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/e858e83ce5aa4a0b8f9a1a77bc13a60d 2024-11-17T01:28:26,730 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cb1eceb810b9491b9d5054442eca5b18 is 50, key is test_row_0/C:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:26,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742216_1392 (size=12151) 2024-11-17T01:28:27,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:27,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806967036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:27,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806967037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:27,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806967038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:27,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806967038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:27,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806967039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-17T01:28:27,056 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-17T01:28:27,057 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:27,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-17T01:28:27,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:27,059 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:27,059 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:27,059 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:27,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cb1eceb810b9491b9d5054442eca5b18 2024-11-17T01:28:27,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/818905493189424bb365f418796806df as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df 2024-11-17T01:28:27,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df, entries=200, sequenceid=197, filesize=38.8 K 2024-11-17T01:28:27,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/e858e83ce5aa4a0b8f9a1a77bc13a60d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d 2024-11-17T01:28:27,143 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d, entries=150, sequenceid=197, filesize=11.9 K 2024-11-17T01:28:27,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cb1eceb810b9491b9d5054442eca5b18 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18 2024-11-17T01:28:27,146 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18, entries=150, sequenceid=197, filesize=11.9 K 2024-11-17T01:28:27,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9218423c208ee1c122ec7fafbd087d74 in 1251ms, sequenceid=197, compaction requested=true 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:27,147 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:27,147 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:27,147 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102301 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/A is initiating minor compaction (all files) 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/B is initiating minor compaction (all files) 2024-11-17T01:28:27,148 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/A in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:27,148 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/B in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:27,148 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=99.9 K 2024-11-17T01:28:27,148 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/26d9ceaa89c1417f93b9d37d4e167374, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.9 K 2024-11-17T01:28:27,148 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df] 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 26d9ceaa89c1417f93b9d37d4e167374, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e7b46c3ffee4f19ae3d085e3f4fc1e0, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:27,148 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e74eeb69ee748b7b451ee6221514efc, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731806902590 2024-11-17T01:28:27,149 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d8a4ff529284945afaed872b79c7958, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731806902590 2024-11-17T01:28:27,149 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e858e83ce5aa4a0b8f9a1a77bc13a60d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:27,149 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 818905493189424bb365f418796806df, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:27,152 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:27,153 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#B#compaction#330 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:27,154 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/074f1a89ced54d35a1d134b6fbdcd186 is 50, key is test_row_0/B:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:27,156 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411178503c74c5ae844cf8eba09a737cfa270_9218423c208ee1c122ec7fafbd087d74 store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:27,157 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411178503c74c5ae844cf8eba09a737cfa270_9218423c208ee1c122ec7fafbd087d74, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:27,157 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411178503c74c5ae844cf8eba09a737cfa270_9218423c208ee1c122ec7fafbd087d74 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:27,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742217_1393 (size=12595) 2024-11-17T01:28:27,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:27,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742218_1394 (size=4469) 2024-11-17T01:28:27,210 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:27,210 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:27,211 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:27,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:27,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172f97b94d9b5749e19f82b81c5e8c0a13_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806905910/Put/seqid=0 2024-11-17T01:28:27,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742219_1395 (size=12304) 2024-11-17T01:28:27,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:27,561 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#A#compaction#331 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:27,561 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/ff1c176a3e2244a89cc4fca4e9ccf290 is 175, key is test_row_0/A:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:27,562 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/074f1a89ced54d35a1d134b6fbdcd186 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/074f1a89ced54d35a1d134b6fbdcd186 2024-11-17T01:28:27,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742220_1396 (size=31549) 2024-11-17T01:28:27,567 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/B of 9218423c208ee1c122ec7fafbd087d74 into 074f1a89ced54d35a1d134b6fbdcd186(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:27,567 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:27,567 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/B, priority=13, startTime=1731806907147; duration=0sec 2024-11-17T01:28:27,567 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:27,567 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:B 2024-11-17T01:28:27,567 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:27,568 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/ff1c176a3e2244a89cc4fca4e9ccf290 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290 2024-11-17T01:28:27,568 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:27,568 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/C is initiating minor compaction (all files) 2024-11-17T01:28:27,568 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/C in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:27,568 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/387c8411407f4dcdb7a9a40533a04ee8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=35.9 K 2024-11-17T01:28:27,568 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 387c8411407f4dcdb7a9a40533a04ee8, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731806901451 2024-11-17T01:28:27,569 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ee944049c438463fb55d40d25176f528, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731806902590 2024-11-17T01:28:27,569 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting cb1eceb810b9491b9d5054442eca5b18, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:27,571 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/A of 9218423c208ee1c122ec7fafbd087d74 into ff1c176a3e2244a89cc4fca4e9ccf290(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:27,571 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:27,571 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/A, priority=13, startTime=1731806907147; duration=0sec 2024-11-17T01:28:27,571 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:27,571 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:A 2024-11-17T01:28:27,574 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#C#compaction#333 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:27,574 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f1e73c2b437941898c3483251c8b273c is 50, key is test_row_0/C:col10/1731806904767/Put/seqid=0 2024-11-17T01:28:27,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742221_1397 (size=12595) 2024-11-17T01:28:27,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:27,626 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172f97b94d9b5749e19f82b81c5e8c0a13_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172f97b94d9b5749e19f82b81c5e8c0a13_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:27,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd6e8c43cd994e60b5a473d44cda0f9c, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:27,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd6e8c43cd994e60b5a473d44cda0f9c is 175, key is test_row_0/A:col10/1731806905910/Put/seqid=0 2024-11-17T01:28:27,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742222_1398 (size=31105) 2024-11-17T01:28:27,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:27,981 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/f1e73c2b437941898c3483251c8b273c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f1e73c2b437941898c3483251c8b273c 2024-11-17T01:28:27,984 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/C of 9218423c208ee1c122ec7fafbd087d74 into f1e73c2b437941898c3483251c8b273c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:27,984 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:27,984 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/C, priority=13, startTime=1731806907147; duration=0sec 2024-11-17T01:28:27,984 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:27,984 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:C 2024-11-17T01:28:28,031 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd6e8c43cd994e60b5a473d44cda0f9c 2024-11-17T01:28:28,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/d74004eb112f415d9701a214cfb83555 is 50, key is test_row_0/B:col10/1731806905910/Put/seqid=0 2024-11-17T01:28:28,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742223_1399 (size=12151) 2024-11-17T01:28:28,043 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:28,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:28,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806968064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806968065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806968065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806968067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806968071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:28,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806968172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806968173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806968173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,180 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806968176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806968177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806968378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806968378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806968379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,384 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806968380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,385 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806968382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,439 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/d74004eb112f415d9701a214cfb83555 2024-11-17T01:28:28,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e7d9eed29d04df48e233b67602f8eb5 is 50, key is test_row_0/C:col10/1731806905910/Put/seqid=0 2024-11-17T01:28:28,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742224_1400 (size=12151) 2024-11-17T01:28:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806968685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806968685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806968685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806968686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:28,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806968687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:28,849 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e7d9eed29d04df48e233b67602f8eb5 2024-11-17T01:28:28,852 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/dd6e8c43cd994e60b5a473d44cda0f9c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c 2024-11-17T01:28:28,855 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c, entries=150, sequenceid=210, filesize=30.4 K 2024-11-17T01:28:28,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/d74004eb112f415d9701a214cfb83555 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555 2024-11-17T01:28:28,858 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555, entries=150, sequenceid=210, filesize=11.9 K 2024-11-17T01:28:28,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e7d9eed29d04df48e233b67602f8eb5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5 2024-11-17T01:28:28,862 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5, entries=150, sequenceid=210, filesize=11.9 K 2024-11-17T01:28:28,862 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9218423c208ee1c122ec7fafbd087d74 in 1651ms, sequenceid=210, compaction requested=false 2024-11-17T01:28:28,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:28,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:28,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-17T01:28:28,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-17T01:28:28,864 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-17T01:28:28,865 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8040 sec 2024-11-17T01:28:28,865 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.8080 sec 2024-11-17T01:28:29,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-17T01:28:29,161 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-17T01:28:29,162 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:29,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-17T01:28:29,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:29,163 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:29,164 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:29,164 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:29,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:29,193 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:29,194 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:29,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411174e722df384ba45a680b2f8212658f736_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:29,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742225_1401 (size=17284) 2024-11-17T01:28:29,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806969231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806969231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806969231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806969232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806969232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:29,315 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,315 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:29,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:29,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,316 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806969339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806969339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806969339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,343 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806969339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806969339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:29,467 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,468 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806969544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,545 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806969544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,546 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806969544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,550 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806969545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,551 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806969545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,608 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:29,611 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411174e722df384ba45a680b2f8212658f736_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174e722df384ba45a680b2f8212658f736_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:29,611 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3379d67cdc7443cfb319f64fe6d51893, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:29,612 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3379d67cdc7443cfb319f64fe6d51893 is 175, key is test_row_0/A:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:29,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742226_1402 (size=48389) 2024-11-17T01:28:29,620 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,620 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:29,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:29,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:29,777 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:29,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806969847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,850 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806969847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806969848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,856 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806969853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:29,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806969853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,929 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:29,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:29,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:29,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:29,930 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:29,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:28:30,016 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3379d67cdc7443cfb319f64fe6d51893 2024-11-17T01:28:30,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/3077980b612745d39b815ebf7e8a9635 is 50, key is test_row_0/B:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:30,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742227_1403 (size=12151) 2024-11-17T01:28:30,081 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,082 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,234 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:30,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43068 deadline: 1731806970351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:30,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43050 deadline: 1731806970351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,359 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:30,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43056 deadline: 1731806970355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,362 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:30,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43016 deadline: 1731806970358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:30,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:43034 deadline: 1731806970358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/3077980b612745d39b815ebf7e8a9635 2024-11-17T01:28:30,437 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/2acbf266096046b3ab713c900eea5c2a is 50, key is test_row_0/C:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:30,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742228_1404 (size=12151) 2024-11-17T01:28:30,527 DEBUG [Thread-1554 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3875c8c5 to 127.0.0.1:63898 2024-11-17T01:28:30,527 DEBUG [Thread-1554 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:30,528 DEBUG [Thread-1556 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0801ba40 to 127.0.0.1:63898 2024-11-17T01:28:30,528 DEBUG [Thread-1556 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:30,532 DEBUG [Thread-1562 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x798e7fd4 to 127.0.0.1:63898 2024-11-17T01:28:30,532 DEBUG [Thread-1562 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:30,533 DEBUG [Thread-1560 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e3203d9 to 127.0.0.1:63898 2024-11-17T01:28:30,533 DEBUG [Thread-1560 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:30,533 DEBUG [Thread-1558 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x27539bdc to 127.0.0.1:63898 2024-11-17T01:28:30,533 DEBUG [Thread-1558 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:30,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,841 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/2acbf266096046b3ab713c900eea5c2a 2024-11-17T01:28:30,847 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:30,847 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:30,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:30,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,849 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/3379d67cdc7443cfb319f64fe6d51893 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893 2024-11-17T01:28:30,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:30,854 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893, entries=250, sequenceid=238, filesize=47.3 K 2024-11-17T01:28:30,856 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/3077980b612745d39b815ebf7e8a9635 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635 2024-11-17T01:28:30,860 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635, entries=150, sequenceid=238, filesize=11.9 K 2024-11-17T01:28:30,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/2acbf266096046b3ab713c900eea5c2a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a 2024-11-17T01:28:30,864 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a, entries=150, sequenceid=238, filesize=11.9 K 2024-11-17T01:28:30,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 9218423c208ee1c122ec7fafbd087d74 in 1672ms, sequenceid=238, compaction requested=true 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:30,865 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9218423c208ee1c122ec7fafbd087d74:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:30,865 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:30,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:30,866 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:30,866 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111043 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:30,866 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/B is initiating minor compaction (all files) 2024-11-17T01:28:30,866 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/A is initiating minor compaction (all files) 2024-11-17T01:28:30,866 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/B in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,866 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/A in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,866 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/074f1a89ced54d35a1d134b6fbdcd186, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=36.0 K 2024-11-17T01:28:30,866 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=108.4 K 2024-11-17T01:28:30,866 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893] 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 074f1a89ced54d35a1d134b6fbdcd186, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff1c176a3e2244a89cc4fca4e9ccf290, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d74004eb112f415d9701a214cfb83555, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731806905905 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting dd6e8c43cd994e60b5a473d44cda0f9c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731806905905 2024-11-17T01:28:30,867 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3077980b612745d39b815ebf7e8a9635, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1731806908064 2024-11-17T01:28:30,868 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3379d67cdc7443cfb319f64fe6d51893, keycount=250, bloomtype=ROW, size=47.3 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1731806908064 2024-11-17T01:28:30,874 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:30,874 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#B#compaction#339 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:30,874 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/c9dc394d32cf4b61b44105c42199d9b8 is 50, key is test_row_0/B:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:30,876 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411177c465bda936b4416854ea50d2b4158bf_9218423c208ee1c122ec7fafbd087d74 store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:30,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742229_1405 (size=12697) 2024-11-17T01:28:30,880 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411177c465bda936b4416854ea50d2b4158bf_9218423c208ee1c122ec7fafbd087d74, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:30,880 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411177c465bda936b4416854ea50d2b4158bf_9218423c208ee1c122ec7fafbd087d74 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:30,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742230_1406 (size=4469) 2024-11-17T01:28:31,001 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:31,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-17T01:28:31,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:31,002 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:31,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:31,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117a7e65c4e31874949982e586cee3288c9_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_0/A:col10/1731806909231/Put/seqid=0 2024-11-17T01:28:31,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742231_1407 (size=12304) 2024-11-17T01:28:31,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:31,286 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#A#compaction#340 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:31,287 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/bdcfe299740d4dafa3981a374fc913d1 is 175, key is test_row_0/A:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:31,289 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/c9dc394d32cf4b61b44105c42199d9b8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/c9dc394d32cf4b61b44105c42199d9b8 2024-11-17T01:28:31,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742232_1408 (size=31651) 2024-11-17T01:28:31,294 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/B of 9218423c208ee1c122ec7fafbd087d74 into c9dc394d32cf4b61b44105c42199d9b8(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:31,294 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:31,294 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/B, priority=13, startTime=1731806910865; duration=0sec 2024-11-17T01:28:31,294 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:31,294 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:B 2024-11-17T01:28:31,294 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:31,295 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:31,295 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 9218423c208ee1c122ec7fafbd087d74/C is initiating minor compaction (all files) 2024-11-17T01:28:31,295 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9218423c208ee1c122ec7fafbd087d74/C in TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:31,295 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f1e73c2b437941898c3483251c8b273c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp, totalSize=36.0 K 2024-11-17T01:28:31,296 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f1e73c2b437941898c3483251c8b273c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806904759 2024-11-17T01:28:31,296 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e7d9eed29d04df48e233b67602f8eb5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1731806905905 2024-11-17T01:28:31,296 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2acbf266096046b3ab713c900eea5c2a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1731806908064 2024-11-17T01:28:31,302 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9218423c208ee1c122ec7fafbd087d74#C#compaction#342 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:31,302 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/10d7d9d2f9d44880829f506d134c8c68 is 50, key is test_row_0/C:col10/1731806908064/Put/seqid=0 2024-11-17T01:28:31,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742233_1409 (size=12697) 2024-11-17T01:28:31,359 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. as already flushing 2024-11-17T01:28:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:31,359 DEBUG [Thread-1545 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x51f7d511 to 127.0.0.1:63898 2024-11-17T01:28:31,359 DEBUG [Thread-1545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:31,363 DEBUG [Thread-1547 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1dc42ea6 to 127.0.0.1:63898 2024-11-17T01:28:31,363 DEBUG [Thread-1547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:31,363 DEBUG [Thread-1551 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6cd96549 to 127.0.0.1:63898 2024-11-17T01:28:31,363 DEBUG [Thread-1551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:31,364 DEBUG [Thread-1543 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5fe71801 to 127.0.0.1:63898 2024-11-17T01:28:31,364 DEBUG [Thread-1543 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:31,370 DEBUG [Thread-1549 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x117e86d9 to 127.0.0.1:63898 2024-11-17T01:28:31,370 DEBUG [Thread-1549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:31,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:31,421 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117a7e65c4e31874949982e586cee3288c9_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117a7e65c4e31874949982e586cee3288c9_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:31,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/23a25c7b60af4e72bbd6ff6baeb5355b, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:31,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/23a25c7b60af4e72bbd6ff6baeb5355b is 175, key is test_row_0/A:col10/1731806909231/Put/seqid=0 2024-11-17T01:28:31,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742234_1410 (size=31105) 2024-11-17T01:28:31,697 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/bdcfe299740d4dafa3981a374fc913d1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/bdcfe299740d4dafa3981a374fc913d1 2024-11-17T01:28:31,701 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/A of 9218423c208ee1c122ec7fafbd087d74 into bdcfe299740d4dafa3981a374fc913d1(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:31,701 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:31,701 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/A, priority=13, startTime=1731806910865; duration=0sec 2024-11-17T01:28:31,701 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:31,701 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:A 2024-11-17T01:28:31,709 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/10d7d9d2f9d44880829f506d134c8c68 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/10d7d9d2f9d44880829f506d134c8c68 2024-11-17T01:28:31,712 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9218423c208ee1c122ec7fafbd087d74/C of 9218423c208ee1c122ec7fafbd087d74 into 10d7d9d2f9d44880829f506d134c8c68(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:31,712 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:31,712 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74., storeName=9218423c208ee1c122ec7fafbd087d74/C, priority=13, startTime=1731806910865; duration=0sec 2024-11-17T01:28:31,712 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:31,712 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9218423c208ee1c122ec7fafbd087d74:C 2024-11-17T01:28:31,826 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/23a25c7b60af4e72bbd6ff6baeb5355b 2024-11-17T01:28:31,838 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/4b859827bbac4eaba283be74def26c7a is 50, key is test_row_0/B:col10/1731806909231/Put/seqid=0 2024-11-17T01:28:31,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742235_1411 (size=12151) 2024-11-17T01:28:32,244 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/4b859827bbac4eaba283be74def26c7a 2024-11-17T01:28:32,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e3ee6a2a4054ec38a39bff6f8590a42 is 50, key is test_row_0/C:col10/1731806909231/Put/seqid=0 2024-11-17T01:28:32,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742236_1412 (size=12151) 2024-11-17T01:28:32,664 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e3ee6a2a4054ec38a39bff6f8590a42 2024-11-17T01:28:32,674 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/23a25c7b60af4e72bbd6ff6baeb5355b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/23a25c7b60af4e72bbd6ff6baeb5355b 2024-11-17T01:28:32,678 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/23a25c7b60af4e72bbd6ff6baeb5355b, entries=150, sequenceid=250, filesize=30.4 K 2024-11-17T01:28:32,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/4b859827bbac4eaba283be74def26c7a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/4b859827bbac4eaba283be74def26c7a 2024-11-17T01:28:32,683 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/4b859827bbac4eaba283be74def26c7a, entries=150, sequenceid=250, filesize=11.9 K 2024-11-17T01:28:32,684 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/1e3ee6a2a4054ec38a39bff6f8590a42 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e3ee6a2a4054ec38a39bff6f8590a42 2024-11-17T01:28:32,687 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e3ee6a2a4054ec38a39bff6f8590a42, entries=150, sequenceid=250, filesize=11.9 K 2024-11-17T01:28:32,688 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=33.54 KB/34350 for 9218423c208ee1c122ec7fafbd087d74 in 1686ms, sequenceid=250, compaction requested=false 2024-11-17T01:28:32,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:32,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:32,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-17T01:28:32,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-17T01:28:32,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-17T01:28:32,690 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.5250 sec 2024-11-17T01:28:32,691 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 3.5280 sec 2024-11-17T01:28:33,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-17T01:28:33,270 INFO [Thread-1553 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 41 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 45 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 43 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 37 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2402 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7206 rows 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2420 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7260 rows 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2414 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7242 rows 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2414 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7242 rows 2024-11-17T01:28:33,270 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2416 2024-11-17T01:28:33,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7248 rows 2024-11-17T01:28:33,271 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:28:33,271 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34becda3 to 127.0.0.1:63898 2024-11-17T01:28:33,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:33,277 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:28:33,278 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:28:33,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:33,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:33,281 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806913281"}]},"ts":"1731806913281"} 2024-11-17T01:28:33,282 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:28:33,290 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:28:33,290 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:28:33,291 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, UNASSIGN}] 2024-11-17T01:28:33,292 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, UNASSIGN 2024-11-17T01:28:33,292 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:33,293 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:28:33,293 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:33,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:33,444 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:33,445 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing 9218423c208ee1c122ec7fafbd087d74, disabling compactions & flushes 2024-11-17T01:28:33,445 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. after waiting 0 ms 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:33,445 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing 9218423c208ee1c122ec7fafbd087d74 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=A 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:33,445 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=B 2024-11-17T01:28:33,446 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:33,446 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9218423c208ee1c122ec7fafbd087d74, store=C 2024-11-17T01:28:33,446 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:33,450 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117de0fd81536444e0bac78c2dc08306d2a_9218423c208ee1c122ec7fafbd087d74 is 50, key is test_row_1/A:col10/1731806911369/Put/seqid=0 2024-11-17T01:28:33,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742237_1413 (size=9914) 2024-11-17T01:28:33,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:33,855 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:33,863 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117de0fd81536444e0bac78c2dc08306d2a_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de0fd81536444e0bac78c2dc08306d2a_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:33,864 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/8fc6c786400c4e8cb660f5f7b30fa7c6, store: [table=TestAcidGuarantees family=A region=9218423c208ee1c122ec7fafbd087d74] 2024-11-17T01:28:33,864 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/8fc6c786400c4e8cb660f5f7b30fa7c6 is 175, key is test_row_1/A:col10/1731806911369/Put/seqid=0 2024-11-17T01:28:33,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742238_1414 (size=22561) 2024-11-17T01:28:33,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:34,269 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=261, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/8fc6c786400c4e8cb660f5f7b30fa7c6 2024-11-17T01:28:34,280 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/bd67c83cf6824525b37c72d01a0801b3 is 50, key is test_row_1/B:col10/1731806911369/Put/seqid=0 2024-11-17T01:28:34,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742239_1415 (size=9857) 2024-11-17T01:28:34,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:34,420 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893] to archive 2024-11-17T01:28:34,422 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:34,426 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd5a55039b4f46fa8f98dce4b2ba8734 2024-11-17T01:28:34,427 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/07c16f3fa6bf4c679b7c23c2e3a2b815 2024-11-17T01:28:34,428 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/d246f3109e9947b08a219e5e93ed9c40 2024-11-17T01:28:34,429 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4ddb0269630e4c04806b99ba83354312 2024-11-17T01:28:34,430 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c0097d2eca8c43a0b6247ff8f73b0b11 2024-11-17T01:28:34,431 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/742140faded541bebbcccb7091ce62ab 2024-11-17T01:28:34,432 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/85321a334e344970bf7538cab9721a48 2024-11-17T01:28:34,433 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/4cf6e4c2860c4d7b9b5e8c25fedeae13 2024-11-17T01:28:34,434 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/a1b266ece137426ea5cb4a269a956ef5 2024-11-17T01:28:34,435 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/c3ea530a7d5a4281991d8470110d1c11 2024-11-17T01:28:34,435 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3e7b46c3ffee4f19ae3d085e3f4fc1e0 2024-11-17T01:28:34,436 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/6d8a4ff529284945afaed872b79c7958 2024-11-17T01:28:34,438 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/818905493189424bb365f418796806df 2024-11-17T01:28:34,439 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/ff1c176a3e2244a89cc4fca4e9ccf290 2024-11-17T01:28:34,440 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/dd6e8c43cd994e60b5a473d44cda0f9c 2024-11-17T01:28:34,441 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/3379d67cdc7443cfb319f64fe6d51893 2024-11-17T01:28:34,444 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d140cdb488e241b2b6d2a83b39368396, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/68f1d52d11cd40e4aad08c690322c72a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/26d9ceaa89c1417f93b9d37d4e167374, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/074f1a89ced54d35a1d134b6fbdcd186, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635] to archive 2024-11-17T01:28:34,445 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:34,446 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/5c584a97011f421ea41cffed829f967b 2024-11-17T01:28:34,447 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/b00a8bc604214b32ba1040a2745438f6 2024-11-17T01:28:34,448 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d140cdb488e241b2b6d2a83b39368396 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d140cdb488e241b2b6d2a83b39368396 2024-11-17T01:28:34,449 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/79605b47d2de4057a1bbbf19ba2cfb7a 2024-11-17T01:28:34,450 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/14f90a04c42e435b98bec5813e939dfe 2024-11-17T01:28:34,451 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/68f1d52d11cd40e4aad08c690322c72a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/68f1d52d11cd40e4aad08c690322c72a 2024-11-17T01:28:34,452 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/7e4fe7b75e5b4fc3a98bf703ba4e5782 2024-11-17T01:28:34,453 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/1c1b0b1f41ed4eb2b8586dcebe12e23c 2024-11-17T01:28:34,454 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/415ff4589c3443ffadd58056e30ce52c 2024-11-17T01:28:34,456 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/26d9ceaa89c1417f93b9d37d4e167374 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/26d9ceaa89c1417f93b9d37d4e167374 2024-11-17T01:28:34,456 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/09a362418d174bc4a651eb3f3b1bda32 2024-11-17T01:28:34,457 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/9e74eeb69ee748b7b451ee6221514efc 2024-11-17T01:28:34,458 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/074f1a89ced54d35a1d134b6fbdcd186 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/074f1a89ced54d35a1d134b6fbdcd186 2024-11-17T01:28:34,460 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/e858e83ce5aa4a0b8f9a1a77bc13a60d 2024-11-17T01:28:34,461 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/d74004eb112f415d9701a214cfb83555 2024-11-17T01:28:34,462 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/3077980b612745d39b815ebf7e8a9635 2024-11-17T01:28:34,464 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f7bc8c49f67f44caa32e38ba31956fde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/e447ff03b70244d1afd0bbdc0edf393e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/387c8411407f4dcdb7a9a40533a04ee8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f1e73c2b437941898c3483251c8b273c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a] to archive 2024-11-17T01:28:34,465 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:28:34,467 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/9fa7d4147e834641869580f58176ed9c 2024-11-17T01:28:34,468 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f81806a617134dc0a67391e9bf989a96 2024-11-17T01:28:34,469 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f7bc8c49f67f44caa32e38ba31956fde to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f7bc8c49f67f44caa32e38ba31956fde 2024-11-17T01:28:34,470 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cf4881058de142e8a83bfd42a86979d0 2024-11-17T01:28:34,471 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/872fd558a9f641079d42a5c5b3503d2b 2024-11-17T01:28:34,472 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/e447ff03b70244d1afd0bbdc0edf393e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/e447ff03b70244d1afd0bbdc0edf393e 2024-11-17T01:28:34,473 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ea979d4fec4140c597c9c418cc30fcd7 2024-11-17T01:28:34,474 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/77ea4d44c014466d97ee764205de5cf9 2024-11-17T01:28:34,476 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/6ab4a1c5338541aca685f3aec0bac3ad 2024-11-17T01:28:34,477 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/387c8411407f4dcdb7a9a40533a04ee8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/387c8411407f4dcdb7a9a40533a04ee8 2024-11-17T01:28:34,477 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/a175207fafcc4fe495ff703679af6c49 2024-11-17T01:28:34,478 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/ee944049c438463fb55d40d25176f528 2024-11-17T01:28:34,480 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f1e73c2b437941898c3483251c8b273c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/f1e73c2b437941898c3483251c8b273c 2024-11-17T01:28:34,481 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cb1eceb810b9491b9d5054442eca5b18 2024-11-17T01:28:34,482 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e7d9eed29d04df48e233b67602f8eb5 2024-11-17T01:28:34,483 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/04f7e7347dc7:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/2acbf266096046b3ab713c900eea5c2a 2024-11-17T01:28:34,686 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/bd67c83cf6824525b37c72d01a0801b3 2024-11-17T01:28:34,695 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cce1e0433206416fb99adc0e7fd39ac8 is 50, key is test_row_1/C:col10/1731806911369/Put/seqid=0 2024-11-17T01:28:34,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742240_1416 (size=9857) 2024-11-17T01:28:35,100 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cce1e0433206416fb99adc0e7fd39ac8 2024-11-17T01:28:35,109 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/A/8fc6c786400c4e8cb660f5f7b30fa7c6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/8fc6c786400c4e8cb660f5f7b30fa7c6 2024-11-17T01:28:35,113 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/8fc6c786400c4e8cb660f5f7b30fa7c6, entries=100, sequenceid=261, filesize=22.0 K 2024-11-17T01:28:35,114 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/B/bd67c83cf6824525b37c72d01a0801b3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/bd67c83cf6824525b37c72d01a0801b3 2024-11-17T01:28:35,117 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/bd67c83cf6824525b37c72d01a0801b3, entries=100, sequenceid=261, filesize=9.6 K 2024-11-17T01:28:35,118 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/.tmp/C/cce1e0433206416fb99adc0e7fd39ac8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cce1e0433206416fb99adc0e7fd39ac8 2024-11-17T01:28:35,120 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cce1e0433206416fb99adc0e7fd39ac8, entries=100, sequenceid=261, filesize=9.6 K 2024-11-17T01:28:35,121 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 9218423c208ee1c122ec7fafbd087d74 in 1676ms, sequenceid=261, compaction requested=true 2024-11-17T01:28:35,125 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits/264.seqid, newMaxSeqId=264, maxSeqId=4 2024-11-17T01:28:35,125 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74. 2024-11-17T01:28:35,125 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for 9218423c208ee1c122ec7fafbd087d74: 2024-11-17T01:28:35,127 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed 9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,127 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=9218423c208ee1c122ec7fafbd087d74, regionState=CLOSED 2024-11-17T01:28:35,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-17T01:28:35,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure 9218423c208ee1c122ec7fafbd087d74, server=04f7e7347dc7,37721,1731806791503 in 1.8350 sec 2024-11-17T01:28:35,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-17T01:28:35,130 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9218423c208ee1c122ec7fafbd087d74, UNASSIGN in 1.8380 sec 2024-11-17T01:28:35,131 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-17T01:28:35,132 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.8410 sec 2024-11-17T01:28:35,132 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806915132"}]},"ts":"1731806915132"} 2024-11-17T01:28:35,133 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:28:35,173 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:28:35,175 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.8960 sec 2024-11-17T01:28:35,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-17T01:28:35,386 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-17T01:28:35,388 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:28:35,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,391 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-17T01:28:35,392 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,395 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,401 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits] 2024-11-17T01:28:35,407 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/23a25c7b60af4e72bbd6ff6baeb5355b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/23a25c7b60af4e72bbd6ff6baeb5355b 2024-11-17T01:28:35,409 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/8fc6c786400c4e8cb660f5f7b30fa7c6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/8fc6c786400c4e8cb660f5f7b30fa7c6 2024-11-17T01:28:35,411 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/bdcfe299740d4dafa3981a374fc913d1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/A/bdcfe299740d4dafa3981a374fc913d1 2024-11-17T01:28:35,414 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/4b859827bbac4eaba283be74def26c7a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/4b859827bbac4eaba283be74def26c7a 2024-11-17T01:28:35,416 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/bd67c83cf6824525b37c72d01a0801b3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/bd67c83cf6824525b37c72d01a0801b3 2024-11-17T01:28:35,417 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/c9dc394d32cf4b61b44105c42199d9b8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/B/c9dc394d32cf4b61b44105c42199d9b8 2024-11-17T01:28:35,420 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/10d7d9d2f9d44880829f506d134c8c68 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/10d7d9d2f9d44880829f506d134c8c68 2024-11-17T01:28:35,421 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e3ee6a2a4054ec38a39bff6f8590a42 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/1e3ee6a2a4054ec38a39bff6f8590a42 2024-11-17T01:28:35,423 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cce1e0433206416fb99adc0e7fd39ac8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/C/cce1e0433206416fb99adc0e7fd39ac8 2024-11-17T01:28:35,427 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits/264.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74/recovered.edits/264.seqid 2024-11-17T01:28:35,428 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,428 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:28:35,428 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:28:35,429 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-17T01:28:35,434 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111706fe62a1643c4dcb8731f7955fd04b68_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111706fe62a1643c4dcb8731f7955fd04b68_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,436 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411170f79334c7cde46ceba2595f3d6eaab67_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411170f79334c7cde46ceba2595f3d6eaab67_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,438 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411171c5460d601c84115b3c4d8ab39b83d4d_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411171c5460d601c84115b3c4d8ab39b83d4d_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,440 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172f97b94d9b5749e19f82b81c5e8c0a13_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172f97b94d9b5749e19f82b81c5e8c0a13_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,442 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174e722df384ba45a680b2f8212658f736_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174e722df384ba45a680b2f8212658f736_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,443 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111754005f0bd25b4b839a2ab23108a1c0ed_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111754005f0bd25b4b839a2ab23108a1c0ed_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,445 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411176cd1e82904444b4d9f873ce887116fa8_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411176cd1e82904444b4d9f873ce887116fa8_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,447 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411177353fc1f303c4763bd2516f8fa21283d_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411177353fc1f303c4763bd2516f8fa21283d_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,449 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111790388353b2bb4c05b6a6e5ecceeec023_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111790388353b2bb4c05b6a6e5ecceeec023_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,451 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117a7e65c4e31874949982e586cee3288c9_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117a7e65c4e31874949982e586cee3288c9_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,453 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c4b555dd89584ecca8eaaad7fcbc495f_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c4b555dd89584ecca8eaaad7fcbc495f_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,455 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de0fd81536444e0bac78c2dc08306d2a_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de0fd81536444e0bac78c2dc08306d2a_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,457 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de884f4cd04543e38d535aff2337161e_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117de884f4cd04543e38d535aff2337161e_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,459 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea8de26a076b4ef6a3f051fb485384bd_9218423c208ee1c122ec7fafbd087d74 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea8de26a076b4ef6a3f051fb485384bd_9218423c208ee1c122ec7fafbd087d74 2024-11-17T01:28:35,460 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:28:35,463 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,466 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:28:35,467 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:28:35,468 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,468 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:28:35,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806915468"}]},"ts":"9223372036854775807"} 2024-11-17T01:28:35,470 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:28:35,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9218423c208ee1c122ec7fafbd087d74, NAME => 'TestAcidGuarantees,,1731806887286.9218423c208ee1c122ec7fafbd087d74.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:28:35,470 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:28:35,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806915470"}]},"ts":"9223372036854775807"} 2024-11-17T01:28:35,471 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:28:35,482 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,483 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 94 msec 2024-11-17T01:28:35,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-17T01:28:35,493 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-17T01:28:35,506 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=239 (was 238) - Thread LEAK? -, OpenFileDescriptor=453 (was 451) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=290 (was 303), ProcessCount=11 (was 11), AvailableMemoryMB=3852 (was 3878) 2024-11-17T01:28:35,515 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=290, ProcessCount=11, AvailableMemoryMB=3852 2024-11-17T01:28:35,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:28:35,516 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:28:35,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:35,518 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:28:35,518 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:35,518 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-11-17T01:28:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:35,519 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:28:35,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742241_1417 (size=960) 2024-11-17T01:28:35,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:35,931 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:28:35,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742242_1418 (size=53) 2024-11-17T01:28:36,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:36,345 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:28:36,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 54b0ceeaba8680637a0c22225fbae49b, disabling compactions & flushes 2024-11-17T01:28:36,346 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. after waiting 0 ms 2024-11-17T01:28:36,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,346 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,346 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:36,348 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:28:36,348 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806916348"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806916348"}]},"ts":"1731806916348"} 2024-11-17T01:28:36,350 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:28:36,352 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:28:36,352 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806916352"}]},"ts":"1731806916352"} 2024-11-17T01:28:36,354 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:28:36,399 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, ASSIGN}] 2024-11-17T01:28:36,401 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, ASSIGN 2024-11-17T01:28:36,402 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:28:36,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=54b0ceeaba8680637a0c22225fbae49b, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:36,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure 54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:36,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:36,709 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:36,716 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,717 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:28:36,718 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,718 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:28:36,718 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,718 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,720 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,722 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:36,722 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54b0ceeaba8680637a0c22225fbae49b columnFamilyName A 2024-11-17T01:28:36,722 DEBUG [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:36,723 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(327): Store=54b0ceeaba8680637a0c22225fbae49b/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:36,723 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,724 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:36,724 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54b0ceeaba8680637a0c22225fbae49b columnFamilyName B 2024-11-17T01:28:36,724 DEBUG [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:36,725 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(327): Store=54b0ceeaba8680637a0c22225fbae49b/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:36,725 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,726 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:28:36,726 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 54b0ceeaba8680637a0c22225fbae49b columnFamilyName C 2024-11-17T01:28:36,726 DEBUG [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:28:36,727 INFO [StoreOpener-54b0ceeaba8680637a0c22225fbae49b-1 {}] regionserver.HStore(327): Store=54b0ceeaba8680637a0c22225fbae49b/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:28:36,727 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,727 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,728 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,729 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:28:36,730 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:36,732 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:28:36,732 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened 54b0ceeaba8680637a0c22225fbae49b; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74282185, jitterRate=0.10689081251621246}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:28:36,733 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:36,733 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., pid=127, masterSystemTime=1731806916709 2024-11-17T01:28:36,735 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,735 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:36,735 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=54b0ceeaba8680637a0c22225fbae49b, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:36,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-17T01:28:36,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure 54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 in 180 msec 2024-11-17T01:28:36,739 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-17T01:28:36,739 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, ASSIGN in 338 msec 2024-11-17T01:28:36,739 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:28:36,739 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806916739"}]},"ts":"1731806916739"} 2024-11-17T01:28:36,740 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:28:36,749 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:28:36,751 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2330 sec 2024-11-17T01:28:37,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-17T01:28:37,626 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-11-17T01:28:37,629 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x59bd764a to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@238db126 2024-11-17T01:28:37,667 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3512017b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,671 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,674 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59648, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,676 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:28:37,677 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:28:37,679 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-11-17T01:28:37,691 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,691 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-17T01:28:37,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,700 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-17T01:28:37,708 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,709 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-17T01:28:37,716 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,717 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-17T01:28:37,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ed69825 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@34b30c39 2024-11-17T01:28:37,732 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b7f20c4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,733 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-17T01:28:37,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-17T01:28:37,749 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,750 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-17T01:28:37,757 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,758 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-17T01:28:37,766 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:28:37,774 DEBUG [hconnection-0x21a3cc83-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,774 DEBUG [hconnection-0x30b8cd03-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,775 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59662, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,775 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59652, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,775 DEBUG [hconnection-0x6d7675eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,775 DEBUG [hconnection-0x40ca9550-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,776 DEBUG [hconnection-0x4fe69aef-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,776 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,776 DEBUG [hconnection-0x30d2bedf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,776 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59684, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,776 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,777 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,777 DEBUG [hconnection-0x3a3bd821-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,778 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:37,783 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:37,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:37,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806977791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,792 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806977792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806977792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,793 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806977793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,803 DEBUG [hconnection-0x403e55b9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,804 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59720, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,808 DEBUG [hconnection-0x5d9fb4f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,809 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,809 DEBUG [hconnection-0x6c68e272-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:28:37,811 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59730, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:28:37,811 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-17T01:28:37,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806977812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,813 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:37,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:37,813 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:37,813 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:37,819 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2a6a41d537694b848675ebfeab7e96a8 is 50, key is test_row_0/A:col10/1731806917782/Put/seqid=0 2024-11-17T01:28:37,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742243_1419 (size=12001) 2024-11-17T01:28:37,896 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806977893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806977893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806977893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806977894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:37,915 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:37,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806977913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,964 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:37,965 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:37,965 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:37,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:37,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806978097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806978098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806978098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806978099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:38,117 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,117 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,117 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,117 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806978117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2a6a41d537694b848675ebfeab7e96a8 2024-11-17T01:28:38,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/acb1eaea2a9d467f91512c8255c8f686 is 50, key is test_row_0/B:col10/1731806917782/Put/seqid=0 2024-11-17T01:28:38,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742244_1420 (size=12001) 2024-11-17T01:28:38,269 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806978403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806978403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806978403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,408 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806978404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:38,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,421 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806978421, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,574 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,575 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,659 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/acb1eaea2a9d467f91512c8255c8f686 2024-11-17T01:28:38,676 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/2b3426dd65cf443aa90892b07f93386f is 50, key is test_row_0/C:col10/1731806917782/Put/seqid=0 2024-11-17T01:28:38,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742245_1421 (size=12001) 2024-11-17T01:28:38,727 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,727 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,727 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,727 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,879 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,880 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:38,880 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:38,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806978908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806978910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,913 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806978911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,914 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806978912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:38,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:38,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:38,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806978929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:39,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:39,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:39,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:39,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:39,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:39,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:39,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/2b3426dd65cf443aa90892b07f93386f 2024-11-17T01:28:39,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2a6a41d537694b848675ebfeab7e96a8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8 2024-11-17T01:28:39,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8, entries=150, sequenceid=15, filesize=11.7 K 2024-11-17T01:28:39,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/acb1eaea2a9d467f91512c8255c8f686 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686 2024-11-17T01:28:39,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686, entries=150, sequenceid=15, filesize=11.7 K 2024-11-17T01:28:39,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/2b3426dd65cf443aa90892b07f93386f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f 2024-11-17T01:28:39,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f, entries=150, sequenceid=15, filesize=11.7 K 2024-11-17T01:28:39,094 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=140.89 KB/144270 for 54b0ceeaba8680637a0c22225fbae49b in 1312ms, sequenceid=15, compaction requested=false 2024-11-17T01:28:39,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:39,184 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,184 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-17T01:28:39,184 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:39,184 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:39,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:39,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/66302624b5f14381b1803022b07e970d is 50, key is test_row_0/A:col10/1731806917791/Put/seqid=0 2024-11-17T01:28:39,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742246_1422 (size=12001) 2024-11-17T01:28:39,192 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/66302624b5f14381b1803022b07e970d 2024-11-17T01:28:39,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bc2993b5a88047f5a4a88c048178df29 is 50, key is test_row_0/B:col10/1731806917791/Put/seqid=0 2024-11-17T01:28:39,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742247_1423 (size=12001) 2024-11-17T01:28:39,600 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bc2993b5a88047f5a4a88c048178df29 2024-11-17T01:28:39,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/be2ed07c5bec455d8e872f0e5f31dac7 is 50, key is test_row_0/C:col10/1731806917791/Put/seqid=0 2024-11-17T01:28:39,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742248_1424 (size=12001) 2024-11-17T01:28:39,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:39,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:39,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:39,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806979954, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806979961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806979961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,967 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:39,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806979961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:39,968 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:39,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806979962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,009 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/be2ed07c5bec455d8e872f0e5f31dac7 2024-11-17T01:28:40,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/66302624b5f14381b1803022b07e970d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d 2024-11-17T01:28:40,016 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d, entries=150, sequenceid=39, filesize=11.7 K 2024-11-17T01:28:40,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bc2993b5a88047f5a4a88c048178df29 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29 2024-11-17T01:28:40,020 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29, entries=150, sequenceid=39, filesize=11.7 K 2024-11-17T01:28:40,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/be2ed07c5bec455d8e872f0e5f31dac7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7 2024-11-17T01:28:40,023 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7, entries=150, sequenceid=39, filesize=11.7 K 2024-11-17T01:28:40,024 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for 54b0ceeaba8680637a0c22225fbae49b in 840ms, sequenceid=39, compaction requested=false 2024-11-17T01:28:40,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:40,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:40,024 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-17T01:28:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-17T01:28:40,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-17T01:28:40,026 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2120 sec 2024-11-17T01:28:40,027 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.2160 sec 2024-11-17T01:28:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:40,066 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:40,066 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:40,070 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/90de21b5096e43c582e29d4be60b5a9d is 50, key is test_row_0/A:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:40,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742249_1425 (size=14341) 2024-11-17T01:28:40,094 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806980088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806980088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806980094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806980093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,100 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806980094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806980195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,201 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806980196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,206 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806980201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806980201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806980201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806980402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806980403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806980408, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806980409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806980409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,475 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/90de21b5096e43c582e29d4be60b5a9d 2024-11-17T01:28:40,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/465f8e9e86e145e7858f810aef551e24 is 50, key is test_row_0/B:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:40,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742250_1426 (size=12001) 2024-11-17T01:28:40,651 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:28:40,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806980707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806980709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806980713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806980714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:40,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806980715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:40,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/465f8e9e86e145e7858f810aef551e24 2024-11-17T01:28:40,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/ddef502b33244f60a0cbeb3bc2fb4403 is 50, key is test_row_0/C:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:40,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742251_1427 (size=12001) 2024-11-17T01:28:41,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:41,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806981211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:41,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:41,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806981218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:41,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:41,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806981219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:41,223 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:41,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806981220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:41,229 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:41,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806981225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:41,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/ddef502b33244f60a0cbeb3bc2fb4403 2024-11-17T01:28:41,345 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/90de21b5096e43c582e29d4be60b5a9d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d 2024-11-17T01:28:41,348 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d, entries=200, sequenceid=54, filesize=14.0 K 2024-11-17T01:28:41,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/465f8e9e86e145e7858f810aef551e24 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24 2024-11-17T01:28:41,351 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24, entries=150, sequenceid=54, filesize=11.7 K 2024-11-17T01:28:41,352 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/ddef502b33244f60a0cbeb3bc2fb4403 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403 2024-11-17T01:28:41,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403, entries=150, sequenceid=54, filesize=11.7 K 2024-11-17T01:28:41,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 54b0ceeaba8680637a0c22225fbae49b in 1289ms, sequenceid=54, compaction requested=true 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:41,356 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:41,356 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:41,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:41,357 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:41,357 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:41,357 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.2 K 2024-11-17T01:28:41,357 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=37.4 K 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2a6a41d537694b848675ebfeab7e96a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731806917780 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting acb1eaea2a9d467f91512c8255c8f686, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731806917780 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bc2993b5a88047f5a4a88c048178df29, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731806917788 2024-11-17T01:28:41,357 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 66302624b5f14381b1803022b07e970d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731806917788 2024-11-17T01:28:41,358 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 465f8e9e86e145e7858f810aef551e24, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:41,358 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90de21b5096e43c582e29d4be60b5a9d, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:41,363 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#357 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:41,363 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/7185447f86334a9b8551c0ac99c35490 is 50, key is test_row_0/A:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:41,366 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#358 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:41,366 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d54752772b04563a771f700c1b2f41b is 50, key is test_row_0/B:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:41,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742252_1428 (size=12104) 2024-11-17T01:28:41,376 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/7185447f86334a9b8551c0ac99c35490 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/7185447f86334a9b8551c0ac99c35490 2024-11-17T01:28:41,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742253_1429 (size=12104) 2024-11-17T01:28:41,381 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 7185447f86334a9b8551c0ac99c35490(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:41,381 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:41,381 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806921356; duration=0sec 2024-11-17T01:28:41,382 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:41,382 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:41,382 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:41,383 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:41,383 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:41,383 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:41,383 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.2 K 2024-11-17T01:28:41,383 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b3426dd65cf443aa90892b07f93386f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731806917780 2024-11-17T01:28:41,383 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting be2ed07c5bec455d8e872f0e5f31dac7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731806917788 2024-11-17T01:28:41,384 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddef502b33244f60a0cbeb3bc2fb4403, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:41,385 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d54752772b04563a771f700c1b2f41b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d54752772b04563a771f700c1b2f41b 2024-11-17T01:28:41,390 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#359 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:41,390 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/49ce8e7df9db473f82ab8acf243a7f39 is 50, key is test_row_0/C:col10/1731806919961/Put/seqid=0 2024-11-17T01:28:41,391 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 3d54752772b04563a771f700c1b2f41b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:41,391 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:41,391 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806921356; duration=0sec 2024-11-17T01:28:41,391 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:41,391 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:41,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742254_1430 (size=12104) 2024-11-17T01:28:41,799 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/49ce8e7df9db473f82ab8acf243a7f39 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/49ce8e7df9db473f82ab8acf243a7f39 2024-11-17T01:28:41,802 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into 49ce8e7df9db473f82ab8acf243a7f39(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:41,802 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:41,802 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806921356; duration=0sec 2024-11-17T01:28:41,803 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:41,803 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:41,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-17T01:28:41,917 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-17T01:28:41,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:41,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-17T01:28:41,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:41,919 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:41,919 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:41,919 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:42,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:42,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,071 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-17T01:28:42,071 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:42,072 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:42,072 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:42,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/39d462e020ce48c7bc24a60973ff51db is 50, key is test_row_0/A:col10/1731806920087/Put/seqid=0 2024-11-17T01:28:42,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742255_1431 (size=12001) 2024-11-17T01:28:42,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:42,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:42,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806982236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806982238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,246 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806982239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806982241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806982243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806982344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806982346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,350 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806982347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,351 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806982348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806982352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,479 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/39d462e020ce48c7bc24a60973ff51db 2024-11-17T01:28:42,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/d9391cb441c1457cb18349e49c2d2622 is 50, key is test_row_0/B:col10/1731806920087/Put/seqid=0 2024-11-17T01:28:42,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742256_1432 (size=12001) 2024-11-17T01:28:42,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:42,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806982551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806982551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806982552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806982553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806982558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,858 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806982854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806982857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806982862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,863 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806982862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:42,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806982864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:42,889 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/d9391cb441c1457cb18349e49c2d2622 2024-11-17T01:28:42,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0f18ad1799df4929924cc53bcf6abba9 is 50, key is test_row_0/C:col10/1731806920087/Put/seqid=0 2024-11-17T01:28:42,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742257_1433 (size=12001) 2024-11-17T01:28:43,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:43,298 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0f18ad1799df4929924cc53bcf6abba9 2024-11-17T01:28:43,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/39d462e020ce48c7bc24a60973ff51db as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db 2024-11-17T01:28:43,304 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db, entries=150, sequenceid=78, filesize=11.7 K 2024-11-17T01:28:43,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/d9391cb441c1457cb18349e49c2d2622 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622 2024-11-17T01:28:43,307 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622, entries=150, sequenceid=78, filesize=11.7 K 2024-11-17T01:28:43,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0f18ad1799df4929924cc53bcf6abba9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9 2024-11-17T01:28:43,310 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9, entries=150, sequenceid=78, filesize=11.7 K 2024-11-17T01:28:43,311 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 54b0ceeaba8680637a0c22225fbae49b in 1240ms, sequenceid=78, compaction requested=false 2024-11-17T01:28:43,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:43,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:43,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-17T01:28:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-17T01:28:43,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-17T01:28:43,313 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3930 sec 2024-11-17T01:28:43,313 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.3950 sec 2024-11-17T01:28:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:43,364 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:43,364 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:43,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/324d6f67f71244fc87f29cea7e4cc04a is 50, key is test_row_0/A:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:43,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742258_1434 (size=14341) 2024-11-17T01:28:43,388 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806983383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806983385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806983386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806983386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,393 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806983387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806983489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806983492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806983492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806983493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,496 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806983493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806983695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806983697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806983697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806983697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:43,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806983698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:43,772 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/324d6f67f71244fc87f29cea7e4cc04a 2024-11-17T01:28:43,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/496bae3561d249c189479d6a7e4f55a4 is 50, key is test_row_0/B:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:43,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742259_1435 (size=12001) 2024-11-17T01:28:44,004 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806984001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806984001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806984003, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806984004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,008 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806984005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-17T01:28:44,022 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-17T01:28:44,023 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:44,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-17T01:28:44,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:44,024 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:44,024 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:44,024 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:44,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:44,176 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,176 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-17T01:28:44,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:44,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,176 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,176 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,181 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/496bae3561d249c189479d6a7e4f55a4 2024-11-17T01:28:44,186 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/3054d9fc644142fb9843750d3e20403d is 50, key is test_row_0/C:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:44,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742260_1436 (size=12001) 2024-11-17T01:28:44,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:44,328 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-17T01:28:44,328 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:44,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,329 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,480 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,481 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-17T01:28:44,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:44,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,481 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,481 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:44,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806984509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806984509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806984510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,515 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806984510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806984510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,590 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/3054d9fc644142fb9843750d3e20403d 2024-11-17T01:28:44,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/324d6f67f71244fc87f29cea7e4cc04a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a 2024-11-17T01:28:44,595 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a, entries=200, sequenceid=94, filesize=14.0 K 2024-11-17T01:28:44,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/496bae3561d249c189479d6a7e4f55a4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4 2024-11-17T01:28:44,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4, entries=150, sequenceid=94, filesize=11.7 K 2024-11-17T01:28:44,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/3054d9fc644142fb9843750d3e20403d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d 2024-11-17T01:28:44,601 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d, entries=150, sequenceid=94, filesize=11.7 K 2024-11-17T01:28:44,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 54b0ceeaba8680637a0c22225fbae49b in 1238ms, sequenceid=94, compaction requested=true 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:44,602 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:44,602 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:44,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:44,603 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:44,603 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:44,603 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:44,603 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:44,603 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,603 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,603 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/7185447f86334a9b8551c0ac99c35490, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=37.5 K 2024-11-17T01:28:44,603 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d54752772b04563a771f700c1b2f41b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.3 K 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7185447f86334a9b8551c0ac99c35490, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d54752772b04563a771f700c1b2f41b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d9391cb441c1457cb18349e49c2d2622, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731806920087 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39d462e020ce48c7bc24a60973ff51db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731806920087 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 496bae3561d249c189479d6a7e4f55a4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922240 2024-11-17T01:28:44,604 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 324d6f67f71244fc87f29cea7e4cc04a, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922238 2024-11-17T01:28:44,609 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#366 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:44,609 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/8791b110f3ff4a03afebc409d78c1112 is 50, key is test_row_0/A:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:44,612 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#367 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:44,612 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/55c5e8894c584c6f979caf9d2a4c17d1 is 50, key is test_row_0/B:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:44,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742262_1438 (size=12207) 2024-11-17T01:28:44,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742261_1437 (size=12207) 2024-11-17T01:28:44,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:44,632 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:44,633 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:44,633 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:44,633 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:44,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/cf0694637fe44b9fa555618690213208 is 50, key is test_row_0/A:col10/1731806923384/Put/seqid=0 2024-11-17T01:28:44,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742263_1439 (size=12001) 2024-11-17T01:28:45,023 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/8791b110f3ff4a03afebc409d78c1112 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/8791b110f3ff4a03afebc409d78c1112 2024-11-17T01:28:45,023 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/55c5e8894c584c6f979caf9d2a4c17d1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/55c5e8894c584c6f979caf9d2a4c17d1 2024-11-17T01:28:45,026 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 55c5e8894c584c6f979caf9d2a4c17d1(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:45,026 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 8791b110f3ff4a03afebc409d78c1112(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:45,026 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:45,026 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:45,026 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806924602; duration=0sec 2024-11-17T01:28:45,026 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806924602; duration=0sec 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:45,027 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:45,027 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:45,027 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/49ce8e7df9db473f82ab8acf243a7f39, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.3 K 2024-11-17T01:28:45,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 49ce8e7df9db473f82ab8acf243a7f39, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731806919960 2024-11-17T01:28:45,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f18ad1799df4929924cc53bcf6abba9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731806920087 2024-11-17T01:28:45,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3054d9fc644142fb9843750d3e20403d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922240 2024-11-17T01:28:45,033 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#369 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:45,033 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/570d885ad4ad4958a999e832d54688fd is 50, key is test_row_0/C:col10/1731806923363/Put/seqid=0 2024-11-17T01:28:45,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742264_1440 (size=12207) 2024-11-17T01:28:45,039 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/570d885ad4ad4958a999e832d54688fd as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/570d885ad4ad4958a999e832d54688fd 2024-11-17T01:28:45,040 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/cf0694637fe44b9fa555618690213208 2024-11-17T01:28:45,044 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into 570d885ad4ad4958a999e832d54688fd(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:45,044 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:45,044 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806924602; duration=0sec 2024-11-17T01:28:45,044 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:45,044 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:45,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/beab0bce07e248949fdb836be6e09678 is 50, key is test_row_0/B:col10/1731806923384/Put/seqid=0 2024-11-17T01:28:45,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742265_1441 (size=12001) 2024-11-17T01:28:45,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:45,449 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/beab0bce07e248949fdb836be6e09678 2024-11-17T01:28:45,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/5bfc38c64b8e42fda96393b53631fe47 is 50, key is test_row_0/C:col10/1731806923384/Put/seqid=0 2024-11-17T01:28:45,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742266_1442 (size=12001) 2024-11-17T01:28:45,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:45,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806985529, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806985532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806985533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,542 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806985534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,543 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806985534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806985636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806985639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806985639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,647 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806985643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,648 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806985644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806985840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806985844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806985844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806985847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,853 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:45,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806985849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:45,858 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/5bfc38c64b8e42fda96393b53631fe47 2024-11-17T01:28:45,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/cf0694637fe44b9fa555618690213208 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208 2024-11-17T01:28:45,864 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208, entries=150, sequenceid=114, filesize=11.7 K 2024-11-17T01:28:45,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/beab0bce07e248949fdb836be6e09678 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678 2024-11-17T01:28:45,868 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678, entries=150, sequenceid=114, filesize=11.7 K 2024-11-17T01:28:45,868 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/5bfc38c64b8e42fda96393b53631fe47 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47 2024-11-17T01:28:45,871 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47, entries=150, sequenceid=114, filesize=11.7 K 2024-11-17T01:28:45,872 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 54b0ceeaba8680637a0c22225fbae49b in 1238ms, sequenceid=114, compaction requested=false 2024-11-17T01:28:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:45,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-17T01:28:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-17T01:28:45,873 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-17T01:28:45,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8480 sec 2024-11-17T01:28:45,875 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.8510 sec 2024-11-17T01:28:46,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-17T01:28:46,127 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-17T01:28:46,128 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-17T01:28:46,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:46,129 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:46,130 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:46,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:46,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:46,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:46,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/1261922bb84e418e9c76558e13c6308f is 50, key is test_row_0/A:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742267_1443 (size=14541) 2024-11-17T01:28:46,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/1261922bb84e418e9c76558e13c6308f 2024-11-17T01:28:46,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/1b30a440d70e4d1aa250cb2fca135f06 is 50, key is test_row_0/B:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742268_1444 (size=12151) 2024-11-17T01:28:46,169 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/1b30a440d70e4d1aa250cb2fca135f06 2024-11-17T01:28:46,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/f2f352b0b82541abbc4e35c5fd8f1c4c is 50, key is test_row_0/C:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:46,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806986169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742269_1445 (size=12151) 2024-11-17T01:28:46,181 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806986171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806986172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,182 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806986175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806986176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:46,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-17T01:28:46,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:46,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806986277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806986283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806986283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,288 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806986283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806986284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:46,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-17T01:28:46,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:46,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,434 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:46,486 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806986483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,494 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806986489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806986490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806986490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806986490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,579 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=134 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/f2f352b0b82541abbc4e35c5fd8f1c4c 2024-11-17T01:28:46,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/1261922bb84e418e9c76558e13c6308f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f 2024-11-17T01:28:46,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f, entries=200, sequenceid=134, filesize=14.2 K 2024-11-17T01:28:46,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/1b30a440d70e4d1aa250cb2fca135f06 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06 2024-11-17T01:28:46,588 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06, entries=150, sequenceid=134, filesize=11.9 K 2024-11-17T01:28:46,588 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/f2f352b0b82541abbc4e35c5fd8f1c4c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c 2024-11-17T01:28:46,591 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c, entries=150, sequenceid=134, filesize=11.9 K 2024-11-17T01:28:46,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 54b0ceeaba8680637a0c22225fbae49b in 443ms, sequenceid=134, compaction requested=true 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:46,592 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:46,592 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:46,592 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:46,592 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38749 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:46,592 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:46,593 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,593 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,593 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/55c5e8894c584c6f979caf9d2a4c17d1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.5 K 2024-11-17T01:28:46,593 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/8791b110f3ff4a03afebc409d78c1112, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=37.8 K 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 55c5e8894c584c6f979caf9d2a4c17d1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922240 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting beab0bce07e248949fdb836be6e09678, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731806923384 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b30a440d70e4d1aa250cb2fca135f06, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:46,593 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8791b110f3ff4a03afebc409d78c1112, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922240 2024-11-17T01:28:46,594 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf0694637fe44b9fa555618690213208, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731806923384 2024-11-17T01:28:46,594 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1261922bb84e418e9c76558e13c6308f, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:46,600 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:46,601 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/78425cc6e8404277a072a55cab1858c2 is 50, key is test_row_0/B:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:46,611 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#376 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:46,612 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/15eea2aeed694cd781d7e9b2ad810bfe is 50, key is test_row_0/A:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:46,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742270_1446 (size=12459) 2024-11-17T01:28:46,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742271_1447 (size=12459) 2024-11-17T01:28:46,627 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,627 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:46,627 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:46,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:46,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/9595770f6c314ae4831ed9d740427866 is 50, key is test_row_0/A:col10/1731806926175/Put/seqid=0 2024-11-17T01:28:46,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742272_1448 (size=12151) 2024-11-17T01:28:46,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:46,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:46,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806986809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806986810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,817 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806986813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806986813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806986816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806986916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806986917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806986917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806986921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:46,926 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:46,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806986921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,016 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/78425cc6e8404277a072a55cab1858c2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/78425cc6e8404277a072a55cab1858c2 2024-11-17T01:28:47,019 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 78425cc6e8404277a072a55cab1858c2(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:47,019 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:47,019 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806926592; duration=0sec 2024-11-17T01:28:47,019 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:47,019 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:47,019 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:47,021 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:47,021 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:47,021 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:47,021 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/570d885ad4ad4958a999e832d54688fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.5 K 2024-11-17T01:28:47,021 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 570d885ad4ad4958a999e832d54688fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731806922240 2024-11-17T01:28:47,022 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5bfc38c64b8e42fda96393b53631fe47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1731806923384 2024-11-17T01:28:47,022 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f2f352b0b82541abbc4e35c5fd8f1c4c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:47,024 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/15eea2aeed694cd781d7e9b2ad810bfe as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/15eea2aeed694cd781d7e9b2ad810bfe 2024-11-17T01:28:47,027 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 15eea2aeed694cd781d7e9b2ad810bfe(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:47,027 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:47,027 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806926592; duration=0sec 2024-11-17T01:28:47,027 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:47,027 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:47,028 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#378 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:47,028 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/33b461cd25f44a13abd9a555e933f598 is 50, key is test_row_0/C:col10/1731806925531/Put/seqid=0 2024-11-17T01:28:47,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742273_1449 (size=12459) 2024-11-17T01:28:47,038 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/9595770f6c314ae4831ed9d740427866 2024-11-17T01:28:47,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3336a35df1a24b8893b30cf108dd4a6a is 50, key is test_row_0/B:col10/1731806926175/Put/seqid=0 2024-11-17T01:28:47,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742274_1450 (size=12151) 2024-11-17T01:28:47,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806987121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806987123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806987124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806987127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806987127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806987426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,434 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/33b461cd25f44a13abd9a555e933f598 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/33b461cd25f44a13abd9a555e933f598 2024-11-17T01:28:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806987430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,435 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806987431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806987432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806987432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,438 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into 33b461cd25f44a13abd9a555e933f598(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:47,438 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:47,438 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806926592; duration=0sec 2024-11-17T01:28:47,438 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:47,438 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:47,446 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3336a35df1a24b8893b30cf108dd4a6a 2024-11-17T01:28:47,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0ee05632b99b4017b0ab4911a8d9a7b3 is 50, key is test_row_0/C:col10/1731806926175/Put/seqid=0 2024-11-17T01:28:47,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742275_1451 (size=12151) 2024-11-17T01:28:47,856 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0ee05632b99b4017b0ab4911a8d9a7b3 2024-11-17T01:28:47,859 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/9595770f6c314ae4831ed9d740427866 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866 2024-11-17T01:28:47,861 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866, entries=150, sequenceid=153, filesize=11.9 K 2024-11-17T01:28:47,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3336a35df1a24b8893b30cf108dd4a6a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a 2024-11-17T01:28:47,865 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a, entries=150, sequenceid=153, filesize=11.9 K 2024-11-17T01:28:47,865 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/0ee05632b99b4017b0ab4911a8d9a7b3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3 2024-11-17T01:28:47,868 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3, entries=150, sequenceid=153, filesize=11.9 K 2024-11-17T01:28:47,869 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 54b0ceeaba8680637a0c22225fbae49b in 1242ms, sequenceid=153, compaction requested=false 2024-11-17T01:28:47,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:47,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:47,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-17T01:28:47,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-17T01:28:47,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-17T01:28:47,871 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7400 sec 2024-11-17T01:28:47,872 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.7430 sec 2024-11-17T01:28:47,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:47,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:47,934 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:47,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/056512560f1349d2b6abc0a0320d410e is 50, key is test_row_0/A:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:47,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742276_1452 (size=14541) 2024-11-17T01:28:47,964 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806987957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806987958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806987958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,970 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806987965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:47,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:47,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806987965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806988065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806988065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806988066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806988071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806988072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-17T01:28:48,233 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-17T01:28:48,234 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:48,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-17T01:28:48,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:48,235 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:48,235 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:48,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:48,273 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806988269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806988270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,275 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806988271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806988276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806988277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:48,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/056512560f1349d2b6abc0a0320d410e 2024-11-17T01:28:48,349 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/b20564ce97de4737bf1384812f320d10 is 50, key is test_row_0/B:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:48,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742277_1453 (size=12151) 2024-11-17T01:28:48,386 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:48,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:48,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,387 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:48,539 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:48,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:48,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,539 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806988575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806988576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806988576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,586 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806988581, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:48,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806988584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,691 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,691 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:48,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:48,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/b20564ce97de4737bf1384812f320d10 2024-11-17T01:28:48,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/436f8cdb9a8e493e8f82656669e2777c is 50, key is test_row_0/C:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:48,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742278_1454 (size=12151) 2024-11-17T01:28:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:48,843 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:48,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:48,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,844 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,996 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:48,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:48,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:48,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:48,996 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:48,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806989081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806989082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:49,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806989085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,093 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:49,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806989089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:49,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806989093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:49,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:49,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:49,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:49,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:49,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/436f8cdb9a8e493e8f82656669e2777c 2024-11-17T01:28:49,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/056512560f1349d2b6abc0a0320d410e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e 2024-11-17T01:28:49,168 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e, entries=200, sequenceid=174, filesize=14.2 K 2024-11-17T01:28:49,169 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/b20564ce97de4737bf1384812f320d10 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10 2024-11-17T01:28:49,172 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10, entries=150, sequenceid=174, filesize=11.9 K 2024-11-17T01:28:49,172 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/436f8cdb9a8e493e8f82656669e2777c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c 2024-11-17T01:28:49,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c, entries=150, sequenceid=174, filesize=11.9 K 2024-11-17T01:28:49,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=107.34 KB/109920 for 54b0ceeaba8680637a0c22225fbae49b in 1242ms, sequenceid=174, compaction requested=true 2024-11-17T01:28:49,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:49,177 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39151 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:49,177 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:49,177 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,177 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,178 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/78425cc6e8404277a072a55cab1858c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.9 K 2024-11-17T01:28:49,178 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/15eea2aeed694cd781d7e9b2ad810bfe, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=38.2 K 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15eea2aeed694cd781d7e9b2ad810bfe, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 78425cc6e8404277a072a55cab1858c2, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9595770f6c314ae4831ed9d740427866, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731806926168 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3336a35df1a24b8893b30cf108dd4a6a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731806926168 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 056512560f1349d2b6abc0a0320d410e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:49,178 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting b20564ce97de4737bf1384812f320d10, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:49,187 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#384 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:49,187 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#385 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:49,187 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/109c5619a32546b8aba8c6a0e0f59a86 is 50, key is test_row_0/B:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:49,187 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/47e81a5a204b4528bb0cf37384d6a92d is 50, key is test_row_0/A:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:49,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742279_1455 (size=12561) 2024-11-17T01:28:49,196 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/109c5619a32546b8aba8c6a0e0f59a86 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/109c5619a32546b8aba8c6a0e0f59a86 2024-11-17T01:28:49,199 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 109c5619a32546b8aba8c6a0e0f59a86(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:49,199 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:49,199 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806929177; duration=0sec 2024-11-17T01:28:49,199 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:49,199 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:49,199 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:49,200 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:49,200 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:49,200 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,200 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/33b461cd25f44a13abd9a555e933f598, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=35.9 K 2024-11-17T01:28:49,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742280_1456 (size=12561) 2024-11-17T01:28:49,201 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 33b461cd25f44a13abd9a555e933f598, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=134, earliestPutTs=1731806925531 2024-11-17T01:28:49,201 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ee05632b99b4017b0ab4911a8d9a7b3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731806926168 2024-11-17T01:28:49,201 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 436f8cdb9a8e493e8f82656669e2777c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:49,205 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/47e81a5a204b4528bb0cf37384d6a92d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/47e81a5a204b4528bb0cf37384d6a92d 2024-11-17T01:28:49,207 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#386 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:49,208 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/e8fb390c7da5421c8f2412f45effde30 is 50, key is test_row_0/C:col10/1731806926815/Put/seqid=0 2024-11-17T01:28:49,209 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 47e81a5a204b4528bb0cf37384d6a92d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:49,209 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:49,209 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806929176; duration=0sec 2024-11-17T01:28:49,210 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:49,210 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:49,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742281_1457 (size=12561) 2024-11-17T01:28:49,300 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:49,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:49,301 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:49,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:49,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3286cb190c1a4f6e9892e56adac43560 is 50, key is test_row_0/A:col10/1731806927964/Put/seqid=0 2024-11-17T01:28:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742282_1458 (size=12151) 2024-11-17T01:28:49,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:49,615 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/e8fb390c7da5421c8f2412f45effde30 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e8fb390c7da5421c8f2412f45effde30 2024-11-17T01:28:49,618 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into e8fb390c7da5421c8f2412f45effde30(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:49,618 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:49,618 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806929177; duration=0sec 2024-11-17T01:28:49,618 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:49,618 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:49,712 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3286cb190c1a4f6e9892e56adac43560 2024-11-17T01:28:49,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/731279275c824495922d71766cd4e11d is 50, key is test_row_0/B:col10/1731806927964/Put/seqid=0 2024-11-17T01:28:49,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742283_1459 (size=12151) 2024-11-17T01:28:50,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:50,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:50,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806990110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,120 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/731279275c824495922d71766cd4e11d 2024-11-17T01:28:50,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806990113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806990113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806990114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806990115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/581236ae6d9b4dbda1420ca656d234b0 is 50, key is test_row_0/C:col10/1731806927964/Put/seqid=0 2024-11-17T01:28:50,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742284_1460 (size=12151) 2024-11-17T01:28:50,220 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806990218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806990223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806990224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806990224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,228 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806990224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:50,424 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806990422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806990428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806990428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,430 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806990428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806990430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,530 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/581236ae6d9b4dbda1420ca656d234b0 2024-11-17T01:28:50,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3286cb190c1a4f6e9892e56adac43560 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560 2024-11-17T01:28:50,536 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560, entries=150, sequenceid=195, filesize=11.9 K 2024-11-17T01:28:50,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/731279275c824495922d71766cd4e11d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d 2024-11-17T01:28:50,539 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d, entries=150, sequenceid=195, filesize=11.9 K 2024-11-17T01:28:50,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/581236ae6d9b4dbda1420ca656d234b0 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0 2024-11-17T01:28:50,543 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0, entries=150, sequenceid=195, filesize=11.9 K 2024-11-17T01:28:50,544 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 54b0ceeaba8680637a0c22225fbae49b in 1243ms, sequenceid=195, compaction requested=false 2024-11-17T01:28:50,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:50,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:50,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-17T01:28:50,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-17T01:28:50,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-17T01:28:50,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3100 sec 2024-11-17T01:28:50,547 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 2.3120 sec 2024-11-17T01:28:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:50,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:50,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:50,766 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/70671b2e38f44a3392ca4d6f9031d264 is 50, key is test_row_0/A:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:50,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742285_1461 (size=14541) 2024-11-17T01:28:50,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806990776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806990776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806990777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,789 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806990782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,790 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806990783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806990884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806990884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806990885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,895 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806990890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:50,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:50,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806990890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806991090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806991090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806991091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806991097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,102 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806991098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/70671b2e38f44a3392ca4d6f9031d264 2024-11-17T01:28:51,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d720e0d045f407db7f3a1de38554244 is 50, key is test_row_0/B:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:51,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742286_1462 (size=12151) 2024-11-17T01:28:51,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806991397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806991397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806991398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806991403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806991403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,588 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d720e0d045f407db7f3a1de38554244 2024-11-17T01:28:51,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/54eaccbccb484c5ca56f3038078662a1 is 50, key is test_row_0/C:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:51,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742287_1463 (size=12151) 2024-11-17T01:28:51,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806991903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806991904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806991904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,913 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806991909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:51,916 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806991912, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,034 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/54eaccbccb484c5ca56f3038078662a1 2024-11-17T01:28:52,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/70671b2e38f44a3392ca4d6f9031d264 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264 2024-11-17T01:28:52,040 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264, entries=200, sequenceid=217, filesize=14.2 K 2024-11-17T01:28:52,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d720e0d045f407db7f3a1de38554244 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244 2024-11-17T01:28:52,043 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244, entries=150, sequenceid=217, filesize=11.9 K 2024-11-17T01:28:52,044 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/54eaccbccb484c5ca56f3038078662a1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1 2024-11-17T01:28:52,047 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1, entries=150, sequenceid=217, filesize=11.9 K 2024-11-17T01:28:52,047 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 54b0ceeaba8680637a0c22225fbae49b in 1286ms, sequenceid=217, compaction requested=true 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:52,047 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:52,047 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:52,047 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:52,048 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:52,048 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:52,048 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/47e81a5a204b4528bb0cf37384d6a92d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=38.3 K 2024-11-17T01:28:52,048 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/109c5619a32546b8aba8c6a0e0f59a86, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.0 K 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47e81a5a204b4528bb0cf37384d6a92d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:52,048 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 109c5619a32546b8aba8c6a0e0f59a86, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:52,049 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3286cb190c1a4f6e9892e56adac43560, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731806927957 2024-11-17T01:28:52,049 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 731279275c824495922d71766cd4e11d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731806927957 2024-11-17T01:28:52,049 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70671b2e38f44a3392ca4d6f9031d264, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:52,049 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d720e0d045f407db7f3a1de38554244, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:52,054 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#393 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:52,054 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/2d4d94dbca244ae4b69c8ece44ce5aab is 50, key is test_row_0/B:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:52,055 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#394 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:52,055 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3 is 50, key is test_row_0/A:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:52,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742288_1464 (size=12663) 2024-11-17T01:28:52,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742289_1465 (size=12663) 2024-11-17T01:28:52,071 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3 2024-11-17T01:28:52,075 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into fdbc3ce2f5994db3b15ef7cc53dc8bd3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:52,075 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:52,075 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806932047; duration=0sec 2024-11-17T01:28:52,075 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:52,075 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:52,075 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:52,076 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:52,076 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:52,076 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:52,076 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e8fb390c7da5421c8f2412f45effde30, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.0 K 2024-11-17T01:28:52,076 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8fb390c7da5421c8f2412f45effde30, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1731806926812 2024-11-17T01:28:52,077 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 581236ae6d9b4dbda1420ca656d234b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731806927957 2024-11-17T01:28:52,077 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 54eaccbccb484c5ca56f3038078662a1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:52,083 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#395 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:52,083 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/ad6ce9e838f5412c85de9d419ed5bd0e is 50, key is test_row_0/C:col10/1731806930740/Put/seqid=0 2024-11-17T01:28:52,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742290_1466 (size=12663) 2024-11-17T01:28:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-17T01:28:52,339 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-17T01:28:52,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-17T01:28:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-17T01:28:52,342 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:52,343 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:52,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-17T01:28:52,471 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/2d4d94dbca244ae4b69c8ece44ce5aab as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/2d4d94dbca244ae4b69c8ece44ce5aab 2024-11-17T01:28:52,474 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 2d4d94dbca244ae4b69c8ece44ce5aab(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:52,474 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:52,474 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806932047; duration=0sec 2024-11-17T01:28:52,474 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:52,474 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:52,490 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/ad6ce9e838f5412c85de9d419ed5bd0e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ad6ce9e838f5412c85de9d419ed5bd0e 2024-11-17T01:28:52,493 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into ad6ce9e838f5412c85de9d419ed5bd0e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:52,494 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:52,494 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806932047; duration=0sec 2024-11-17T01:28:52,494 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:52,494 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:52,494 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:52,495 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:52,498 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/bf70a9be9433413c9adb5418a4855a64 is 50, key is test_row_0/A:col10/1731806930776/Put/seqid=0 2024-11-17T01:28:52,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742291_1467 (size=12151) 2024-11-17T01:28:52,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-17T01:28:52,902 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/bf70a9be9433413c9adb5418a4855a64 2024-11-17T01:28:52,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bfdfec0c90f54c3e95d13187ed284540 is 50, key is test_row_0/B:col10/1731806930776/Put/seqid=0 2024-11-17T01:28:52,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742292_1468 (size=12151) 2024-11-17T01:28:52,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:52,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:52,915 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bfdfec0c90f54c3e95d13187ed284540 2024-11-17T01:28:52,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/99a4b54d2fba4faa9f0c0ea0240c22c3 is 50, key is test_row_0/C:col10/1731806930776/Put/seqid=0 2024-11-17T01:28:52,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742293_1469 (size=12151) 2024-11-17T01:28:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:52,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806992932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806992933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806992933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:52,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806992934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:52,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-17T01:28:52,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:52,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806992938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806993039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,042 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806993039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806993039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,043 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806993040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806993045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806993244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806993244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806993244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806993244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806993249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,331 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/99a4b54d2fba4faa9f0c0ea0240c22c3 2024-11-17T01:28:53,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/bf70a9be9433413c9adb5418a4855a64 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64 2024-11-17T01:28:53,342 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64, entries=150, sequenceid=235, filesize=11.9 K 2024-11-17T01:28:53,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/bfdfec0c90f54c3e95d13187ed284540 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540 2024-11-17T01:28:53,345 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540, entries=150, sequenceid=235, filesize=11.9 K 2024-11-17T01:28:53,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/99a4b54d2fba4faa9f0c0ea0240c22c3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3 2024-11-17T01:28:53,348 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3, entries=150, sequenceid=235, filesize=11.9 K 2024-11-17T01:28:53,348 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 54b0ceeaba8680637a0c22225fbae49b in 853ms, sequenceid=235, compaction requested=false 2024-11-17T01:28:53,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:53,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-17T01:28:53,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-17T01:28:53,350 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-17T01:28:53,351 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0060 sec 2024-11-17T01:28:53,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.0110 sec 2024-11-17T01:28:53,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-17T01:28:53,443 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-17T01:28:53,444 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-17T01:28:53,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:53,445 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:53,446 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:53,446 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:53,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:53,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:53,550 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:53,551 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:53,555 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/441bf6b066634cecad80cdbb95ecaf51 is 50, key is test_row_0/A:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:53,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742294_1470 (size=17031) 2024-11-17T01:28:53,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806993560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,570 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806993563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806993564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806993565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,572 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806993566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,597 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:53,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:53,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,671 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806993667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806993671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,675 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806993671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,676 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806993671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,677 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806993672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:53,750 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,750 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806993872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806993876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806993877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806993877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:53,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806993878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:53,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:53,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:53,902 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:53,902 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:53,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/441bf6b066634cecad80cdbb95ecaf51 2024-11-17T01:28:53,964 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/322db0af61654b659231b63d4934f36b is 50, key is test_row_0/B:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:53,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742295_1471 (size=12201) 2024-11-17T01:28:54,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:54,054 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,054 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,055 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806994180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806994181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806994184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806994184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806994185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,206 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,358 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,359 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:54,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,359 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,367 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/322db0af61654b659231b63d4934f36b 2024-11-17T01:28:54,372 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/70712603be2045b0a111a427d46289f7 is 50, key is test_row_0/C:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:54,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742296_1472 (size=12201) 2024-11-17T01:28:54,511 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:54,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:54,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:28:54,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806994686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806994686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806994690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806994694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:54,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806994694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=258 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/70712603be2045b0a111a427d46289f7 2024-11-17T01:28:54,778 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/441bf6b066634cecad80cdbb95ecaf51 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51 2024-11-17T01:28:54,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51, entries=250, sequenceid=258, filesize=16.6 K 2024-11-17T01:28:54,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/322db0af61654b659231b63d4934f36b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b 2024-11-17T01:28:54,784 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b, entries=150, sequenceid=258, filesize=11.9 K 2024-11-17T01:28:54,785 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/70712603be2045b0a111a427d46289f7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7 2024-11-17T01:28:54,787 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7, entries=150, sequenceid=258, filesize=11.9 K 2024-11-17T01:28:54,788 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 54b0ceeaba8680637a0c22225fbae49b in 1238ms, sequenceid=258, compaction requested=true 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:54,788 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:54,788 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:54,788 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:54,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41845 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:54,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:54,789 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:54,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:54,789 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,789 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,789 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=40.9 K 2024-11-17T01:28:54,789 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/2d4d94dbca244ae4b69c8ece44ce5aab, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.1 K 2024-11-17T01:28:54,789 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting fdbc3ce2f5994db3b15ef7cc53dc8bd3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:54,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d4d94dbca244ae4b69c8ece44ce5aab, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:54,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting bf70a9be9433413c9adb5418a4855a64, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731806930776 2024-11-17T01:28:54,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bfdfec0c90f54c3e95d13187ed284540, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731806930776 2024-11-17T01:28:54,790 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 441bf6b066634cecad80cdbb95ecaf51, keycount=250, bloomtype=ROW, size=16.6 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932932 2024-11-17T01:28:54,790 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 322db0af61654b659231b63d4934f36b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932934 2024-11-17T01:28:54,796 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#402 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:54,796 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d2d31da0b31429dada36f4c1e8dfcba is 50, key is test_row_0/B:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:54,799 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#403 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:54,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742297_1473 (size=12815) 2024-11-17T01:28:54,800 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/242b7cbe1f644efd821ed9eb9f1b00c2 is 50, key is test_row_0/A:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:54,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742298_1474 (size=12815) 2024-11-17T01:28:54,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:54,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-17T01:28:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:54,816 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-17T01:28:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:54,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:54,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:54,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:54,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2d373f2925124c4bb01ad9e82c9e949f is 50, key is test_row_0/A:col10/1731806933565/Put/seqid=0 2024-11-17T01:28:54,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742299_1475 (size=12301) 2024-11-17T01:28:54,828 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2d373f2925124c4bb01ad9e82c9e949f 2024-11-17T01:28:54,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/c3e211f838394c858ec8ae3cc7dd64f6 is 50, key is test_row_0/B:col10/1731806933565/Put/seqid=0 2024-11-17T01:28:54,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742300_1476 (size=12301) 2024-11-17T01:28:55,203 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/3d2d31da0b31429dada36f4c1e8dfcba as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d2d31da0b31429dada36f4c1e8dfcba 2024-11-17T01:28:55,207 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/242b7cbe1f644efd821ed9eb9f1b00c2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/242b7cbe1f644efd821ed9eb9f1b00c2 2024-11-17T01:28:55,207 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 3d2d31da0b31429dada36f4c1e8dfcba(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:55,207 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:55,207 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806934788; duration=0sec 2024-11-17T01:28:55,207 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:55,207 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:55,207 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:55,208 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37015 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:55,208 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:55,208 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:55,208 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ad6ce9e838f5412c85de9d419ed5bd0e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.1 K 2024-11-17T01:28:55,208 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ad6ce9e838f5412c85de9d419ed5bd0e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731806930112 2024-11-17T01:28:55,209 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 99a4b54d2fba4faa9f0c0ea0240c22c3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731806930776 2024-11-17T01:28:55,209 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 70712603be2045b0a111a427d46289f7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932934 2024-11-17T01:28:55,210 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 242b7cbe1f644efd821ed9eb9f1b00c2(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:55,210 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:55,210 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806934788; duration=0sec 2024-11-17T01:28:55,211 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:55,211 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:55,214 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#406 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:55,214 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/18399df23cf640c092431dd7b65aefbf is 50, key is test_row_0/C:col10/1731806932934/Put/seqid=0 2024-11-17T01:28:55,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742301_1477 (size=12815) 2024-11-17T01:28:55,238 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/c3e211f838394c858ec8ae3cc7dd64f6 2024-11-17T01:28:55,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c7c4b41b09e84fcab6b0618421c6601a is 50, key is test_row_0/C:col10/1731806933565/Put/seqid=0 2024-11-17T01:28:55,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742302_1478 (size=12301) 2024-11-17T01:28:55,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:55,621 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/18399df23cf640c092431dd7b65aefbf as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/18399df23cf640c092431dd7b65aefbf 2024-11-17T01:28:55,625 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into 18399df23cf640c092431dd7b65aefbf(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:55,625 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:55,625 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806934788; duration=0sec 2024-11-17T01:28:55,625 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:55,625 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:55,648 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c7c4b41b09e84fcab6b0618421c6601a 2024-11-17T01:28:55,651 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/2d373f2925124c4bb01ad9e82c9e949f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f 2024-11-17T01:28:55,653 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f, entries=150, sequenceid=271, filesize=12.0 K 2024-11-17T01:28:55,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/c3e211f838394c858ec8ae3cc7dd64f6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6 2024-11-17T01:28:55,657 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6, entries=150, sequenceid=271, filesize=12.0 K 2024-11-17T01:28:55,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c7c4b41b09e84fcab6b0618421c6601a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a 2024-11-17T01:28:55,660 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a, entries=150, sequenceid=271, filesize=12.0 K 2024-11-17T01:28:55,661 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 54b0ceeaba8680637a0c22225fbae49b in 844ms, sequenceid=271, compaction requested=false 2024-11-17T01:28:55,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:55,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:55,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-17T01:28:55,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-17T01:28:55,662 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-17T01:28:55,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2150 sec 2024-11-17T01:28:55,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 2.2190 sec 2024-11-17T01:28:55,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:55,705 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:55,705 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:55,708 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/f8d51a8bb0e245f3b9a13ea2370debb9 is 50, key is test_row_0/A:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:55,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742303_1479 (size=14741) 2024-11-17T01:28:55,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806995733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806995737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806995739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806995739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,751 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806995740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806995841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806995845, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806995850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806995850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:55,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:55,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806995852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806996044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806996055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806996055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806996055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806996055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,112 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/f8d51a8bb0e245f3b9a13ea2370debb9 2024-11-17T01:28:56,117 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/ee932b07a8c84dd2b6b54c0dc4642f33 is 50, key is test_row_0/B:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:56,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742304_1480 (size=12301) 2024-11-17T01:28:56,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806996348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806996361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806996362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806996362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806996362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,520 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/ee932b07a8c84dd2b6b54c0dc4642f33 2024-11-17T01:28:56,526 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c3433fbc81c44d2eaea5bb0ce5142cf9 is 50, key is test_row_0/C:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:56,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742305_1481 (size=12301) 2024-11-17T01:28:56,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59730 deadline: 1731806996852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59710 deadline: 1731806996868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,873 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59668 deadline: 1731806996870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59684 deadline: 1731806996871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,876 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:28:56,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:59682 deadline: 1731806996872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:56,930 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c3433fbc81c44d2eaea5bb0ce5142cf9 2024-11-17T01:28:56,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/f8d51a8bb0e245f3b9a13ea2370debb9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9 2024-11-17T01:28:56,936 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9, entries=200, sequenceid=285, filesize=14.4 K 2024-11-17T01:28:56,936 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/ee932b07a8c84dd2b6b54c0dc4642f33 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33 2024-11-17T01:28:56,939 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33, entries=150, sequenceid=285, filesize=12.0 K 2024-11-17T01:28:56,940 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/c3433fbc81c44d2eaea5bb0ce5142cf9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9 2024-11-17T01:28:56,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9, entries=150, sequenceid=285, filesize=12.0 K 2024-11-17T01:28:56,942 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 54b0ceeaba8680637a0c22225fbae49b in 1237ms, sequenceid=285, compaction requested=true 2024-11-17T01:28:56,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 54b0ceeaba8680637a0c22225fbae49b:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:28:56,943 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/B is initiating minor compaction (all files) 2024-11-17T01:28:56,943 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/A is initiating minor compaction (all files) 2024-11-17T01:28:56,943 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/B in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:56,943 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/A in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:56,943 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/242b7cbe1f644efd821ed9eb9f1b00c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=38.9 K 2024-11-17T01:28:56,943 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d2d31da0b31429dada36f4c1e8dfcba, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.5 K 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 242b7cbe1f644efd821ed9eb9f1b00c2, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932934 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d2d31da0b31429dada36f4c1e8dfcba, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932934 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d373f2925124c4bb01ad9e82c9e949f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731806933558 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c3e211f838394c858ec8ae3cc7dd64f6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731806933558 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8d51a8bb0e245f3b9a13ea2370debb9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731806935699 2024-11-17T01:28:56,944 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ee932b07a8c84dd2b6b54c0dc4642f33, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731806935699 2024-11-17T01:28:56,953 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#A#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:56,953 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/4e81fe8e1bdc45daa101f4be96e47536 is 50, key is test_row_0/A:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:56,953 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#B#compaction#412 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:56,954 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/87b572ab9af04a26b7d919579c0a463e is 50, key is test_row_0/B:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:56,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742307_1483 (size=13017) 2024-11-17T01:28:56,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742306_1482 (size=13017) 2024-11-17T01:28:57,364 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/4e81fe8e1bdc45daa101f4be96e47536 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/4e81fe8e1bdc45daa101f4be96e47536 2024-11-17T01:28:57,364 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/87b572ab9af04a26b7d919579c0a463e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/87b572ab9af04a26b7d919579c0a463e 2024-11-17T01:28:57,368 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/B of 54b0ceeaba8680637a0c22225fbae49b into 87b572ab9af04a26b7d919579c0a463e(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:57,368 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/A of 54b0ceeaba8680637a0c22225fbae49b into 4e81fe8e1bdc45daa101f4be96e47536(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:57,368 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:57,368 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:57,368 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/B, priority=13, startTime=1731806936943; duration=0sec 2024-11-17T01:28:57,368 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/A, priority=13, startTime=1731806936943; duration=0sec 2024-11-17T01:28:57,368 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:57,368 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:B 2024-11-17T01:28:57,368 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:28:57,369 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:A 2024-11-17T01:28:57,369 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:28:57,369 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37417 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:28:57,369 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 54b0ceeaba8680637a0c22225fbae49b/C is initiating minor compaction (all files) 2024-11-17T01:28:57,369 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 54b0ceeaba8680637a0c22225fbae49b/C in TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:57,369 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/18399df23cf640c092431dd7b65aefbf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp, totalSize=36.5 K 2024-11-17T01:28:57,370 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 18399df23cf640c092431dd7b65aefbf, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=258, earliestPutTs=1731806932934 2024-11-17T01:28:57,370 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c7c4b41b09e84fcab6b0618421c6601a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1731806933558 2024-11-17T01:28:57,370 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c3433fbc81c44d2eaea5bb0ce5142cf9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1731806935699 2024-11-17T01:28:57,376 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 54b0ceeaba8680637a0c22225fbae49b#C#compaction#413 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:28:57,376 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/894f5da332d841339a3e1dce7e427b10 is 50, key is test_row_0/C:col10/1731806935703/Put/seqid=0 2024-11-17T01:28:57,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742308_1484 (size=13017) 2024-11-17T01:28:57,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-17T01:28:57,549 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-17T01:28:57,550 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:28:57,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-17T01:28:57,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:57,551 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:28:57,552 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:28:57,552 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:28:57,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:57,703 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:57,703 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-17T01:28:57,703 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:57,704 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:57,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:57,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3a63de59405344b7a14ae22b0649f4c3 is 50, key is test_row_0/A:col10/1731806935737/Put/seqid=0 2024-11-17T01:28:57,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742309_1485 (size=12301) 2024-11-17T01:28:57,783 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/894f5da332d841339a3e1dce7e427b10 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/894f5da332d841339a3e1dce7e427b10 2024-11-17T01:28:57,786 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 54b0ceeaba8680637a0c22225fbae49b/C of 54b0ceeaba8680637a0c22225fbae49b into 894f5da332d841339a3e1dce7e427b10(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:28:57,786 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:57,786 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b., storeName=54b0ceeaba8680637a0c22225fbae49b/C, priority=13, startTime=1731806936943; duration=0sec 2024-11-17T01:28:57,786 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:28:57,786 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 54b0ceeaba8680637a0c22225fbae49b:C 2024-11-17T01:28:57,811 DEBUG [Thread-1895 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:63898 2024-11-17T01:28:57,811 DEBUG [Thread-1895 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,812 DEBUG [Thread-1901 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:63898 2024-11-17T01:28:57,812 DEBUG [Thread-1901 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,813 DEBUG [Thread-1899 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:63898 2024-11-17T01:28:57,813 DEBUG [Thread-1899 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,814 DEBUG [Thread-1893 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ed69825 to 127.0.0.1:63898 2024-11-17T01:28:57,814 DEBUG [Thread-1893 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,815 DEBUG [Thread-1897 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:63898 2024-11-17T01:28:57,815 DEBUG [Thread-1897 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:57,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. as already flushing 2024-11-17T01:28:57,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:57,866 DEBUG [Thread-1882 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:63898 2024-11-17T01:28:57,866 DEBUG [Thread-1882 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,878 DEBUG [Thread-1888 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:63898 2024-11-17T01:28:57,878 DEBUG [Thread-1888 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,880 DEBUG [Thread-1890 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:63898 2024-11-17T01:28:57,880 DEBUG [Thread-1890 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,884 DEBUG [Thread-1886 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:63898 2024-11-17T01:28:57,884 DEBUG [Thread-1886 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:57,885 DEBUG [Thread-1884 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:63898 2024-11-17T01:28:57,885 DEBUG [Thread-1884 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:58,113 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3a63de59405344b7a14ae22b0649f4c3 2024-11-17T01:28:58,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/f2950890010a46d0a0ab17ce9aa165e4 is 50, key is test_row_0/B:col10/1731806935737/Put/seqid=0 2024-11-17T01:28:58,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742310_1486 (size=12301) 2024-11-17T01:28:58,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:58,532 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/f2950890010a46d0a0ab17ce9aa165e4 2024-11-17T01:28:58,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/39f5232de2904a9681dbc66c13744d99 is 50, key is test_row_0/C:col10/1731806935737/Put/seqid=0 2024-11-17T01:28:58,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742311_1487 (size=12301) 2024-11-17T01:28:58,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:58,951 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/39f5232de2904a9681dbc66c13744d99 2024-11-17T01:28:58,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/3a63de59405344b7a14ae22b0649f4c3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3a63de59405344b7a14ae22b0649f4c3 2024-11-17T01:28:58,966 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3a63de59405344b7a14ae22b0649f4c3, entries=150, sequenceid=312, filesize=12.0 K 2024-11-17T01:28:58,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/f2950890010a46d0a0ab17ce9aa165e4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/f2950890010a46d0a0ab17ce9aa165e4 2024-11-17T01:28:58,969 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/f2950890010a46d0a0ab17ce9aa165e4, entries=150, sequenceid=312, filesize=12.0 K 2024-11-17T01:28:58,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/39f5232de2904a9681dbc66c13744d99 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/39f5232de2904a9681dbc66c13744d99 2024-11-17T01:28:58,972 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/39f5232de2904a9681dbc66c13744d99, entries=150, sequenceid=312, filesize=12.0 K 2024-11-17T01:28:58,973 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=33.54 KB/34350 for 54b0ceeaba8680637a0c22225fbae49b in 1270ms, sequenceid=312, compaction requested=false 2024-11-17T01:28:58,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:28:58,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:58,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-17T01:28:58,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-17T01:28:58,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-17T01:28:58,975 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4220 sec 2024-11-17T01:28:58,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.4250 sec 2024-11-17T01:28:59,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-17T01:28:59,658 INFO [Thread-1892 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-17T01:28:59,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:28:59,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 40 2024-11-17T01:28:59,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-17T01:28:59,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-17T01:28:59,658 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2541 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7623 rows 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2534 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7602 rows 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2546 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7638 rows 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2544 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7632 rows 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2549 2024-11-17T01:28:59,659 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7647 rows 2024-11-17T01:28:59,659 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:28:59,659 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x59bd764a to 127.0.0.1:63898 2024-11-17T01:28:59,659 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:28:59,665 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:28:59,666 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:28:59,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:28:59,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:28:59,669 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806939669"}]},"ts":"1731806939669"} 2024-11-17T01:28:59,670 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:28:59,707 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:28:59,708 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:28:59,710 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, UNASSIGN}] 2024-11-17T01:28:59,712 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=145, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, UNASSIGN 2024-11-17T01:28:59,713 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=54b0ceeaba8680637a0c22225fbae49b, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:59,714 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:28:59,714 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; CloseRegionProcedure 54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:28:59,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:28:59,867 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:28:59,868 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(124): Close 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:28:59,868 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:28:59,868 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1681): Closing 54b0ceeaba8680637a0c22225fbae49b, disabling compactions & flushes 2024-11-17T01:28:59,868 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:59,868 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:59,869 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. after waiting 0 ms 2024-11-17T01:28:59,869 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:28:59,869 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(2837): Flushing 54b0ceeaba8680637a0c22225fbae49b 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-17T01:28:59,869 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=A 2024-11-17T01:28:59,869 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:59,870 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=B 2024-11-17T01:28:59,870 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:59,870 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 54b0ceeaba8680637a0c22225fbae49b, store=C 2024-11-17T01:28:59,870 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:28:59,877 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/b6fa50fdfbdc4d36b4716055d6f47316 is 50, key is test_row_0/A:col10/1731806937884/Put/seqid=0 2024-11-17T01:28:59,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742312_1488 (size=9857) 2024-11-17T01:28:59,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:28:59,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:29:00,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:29:00,284 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/b6fa50fdfbdc4d36b4716055d6f47316 2024-11-17T01:29:00,292 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/444fb483288f453dbeb9a315402a2eae is 50, key is test_row_0/B:col10/1731806937884/Put/seqid=0 2024-11-17T01:29:00,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742313_1489 (size=9857) 2024-11-17T01:29:00,697 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/444fb483288f453dbeb9a315402a2eae 2024-11-17T01:29:00,711 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/e345eb2bb08f4f3b84f34d2e909756da is 50, key is test_row_0/C:col10/1731806937884/Put/seqid=0 2024-11-17T01:29:00,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742314_1490 (size=9857) 2024-11-17T01:29:00,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:29:01,115 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/e345eb2bb08f4f3b84f34d2e909756da 2024-11-17T01:29:01,126 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/A/b6fa50fdfbdc4d36b4716055d6f47316 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/b6fa50fdfbdc4d36b4716055d6f47316 2024-11-17T01:29:01,130 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/b6fa50fdfbdc4d36b4716055d6f47316, entries=100, sequenceid=321, filesize=9.6 K 2024-11-17T01:29:01,131 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/B/444fb483288f453dbeb9a315402a2eae as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/444fb483288f453dbeb9a315402a2eae 2024-11-17T01:29:01,134 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/444fb483288f453dbeb9a315402a2eae, entries=100, sequenceid=321, filesize=9.6 K 2024-11-17T01:29:01,135 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/.tmp/C/e345eb2bb08f4f3b84f34d2e909756da as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e345eb2bb08f4f3b84f34d2e909756da 2024-11-17T01:29:01,139 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e345eb2bb08f4f3b84f34d2e909756da, entries=100, sequenceid=321, filesize=9.6 K 2024-11-17T01:29:01,139 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 54b0ceeaba8680637a0c22225fbae49b in 1270ms, sequenceid=321, compaction requested=true 2024-11-17T01:29:01,140 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/7185447f86334a9b8551c0ac99c35490, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/8791b110f3ff4a03afebc409d78c1112, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/15eea2aeed694cd781d7e9b2ad810bfe, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/47e81a5a204b4528bb0cf37384d6a92d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/242b7cbe1f644efd821ed9eb9f1b00c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9] to archive 2024-11-17T01:29:01,141 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:01,142 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2a6a41d537694b848675ebfeab7e96a8 2024-11-17T01:29:01,143 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/66302624b5f14381b1803022b07e970d 2024-11-17T01:29:01,144 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/90de21b5096e43c582e29d4be60b5a9d 2024-11-17T01:29:01,144 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/7185447f86334a9b8551c0ac99c35490 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/7185447f86334a9b8551c0ac99c35490 2024-11-17T01:29:01,145 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/39d462e020ce48c7bc24a60973ff51db 2024-11-17T01:29:01,146 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/324d6f67f71244fc87f29cea7e4cc04a 2024-11-17T01:29:01,147 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/8791b110f3ff4a03afebc409d78c1112 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/8791b110f3ff4a03afebc409d78c1112 2024-11-17T01:29:01,148 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/cf0694637fe44b9fa555618690213208 2024-11-17T01:29:01,149 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/1261922bb84e418e9c76558e13c6308f 2024-11-17T01:29:01,150 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/15eea2aeed694cd781d7e9b2ad810bfe to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/15eea2aeed694cd781d7e9b2ad810bfe 2024-11-17T01:29:01,150 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/9595770f6c314ae4831ed9d740427866 2024-11-17T01:29:01,151 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/056512560f1349d2b6abc0a0320d410e 2024-11-17T01:29:01,152 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/47e81a5a204b4528bb0cf37384d6a92d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/47e81a5a204b4528bb0cf37384d6a92d 2024-11-17T01:29:01,152 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3286cb190c1a4f6e9892e56adac43560 2024-11-17T01:29:01,153 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/70671b2e38f44a3392ca4d6f9031d264 2024-11-17T01:29:01,153 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/fdbc3ce2f5994db3b15ef7cc53dc8bd3 2024-11-17T01:29:01,154 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/bf70a9be9433413c9adb5418a4855a64 2024-11-17T01:29:01,155 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/441bf6b066634cecad80cdbb95ecaf51 2024-11-17T01:29:01,155 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/242b7cbe1f644efd821ed9eb9f1b00c2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/242b7cbe1f644efd821ed9eb9f1b00c2 2024-11-17T01:29:01,156 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/2d373f2925124c4bb01ad9e82c9e949f 2024-11-17T01:29:01,157 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/f8d51a8bb0e245f3b9a13ea2370debb9 2024-11-17T01:29:01,157 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d54752772b04563a771f700c1b2f41b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/55c5e8894c584c6f979caf9d2a4c17d1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/78425cc6e8404277a072a55cab1858c2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/109c5619a32546b8aba8c6a0e0f59a86, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/2d4d94dbca244ae4b69c8ece44ce5aab, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d2d31da0b31429dada36f4c1e8dfcba, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33] to archive 2024-11-17T01:29:01,158 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:01,159 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/acb1eaea2a9d467f91512c8255c8f686 2024-11-17T01:29:01,159 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bc2993b5a88047f5a4a88c048178df29 2024-11-17T01:29:01,160 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d54752772b04563a771f700c1b2f41b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d54752772b04563a771f700c1b2f41b 2024-11-17T01:29:01,161 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/465f8e9e86e145e7858f810aef551e24 2024-11-17T01:29:01,161 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/d9391cb441c1457cb18349e49c2d2622 2024-11-17T01:29:01,162 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/55c5e8894c584c6f979caf9d2a4c17d1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/55c5e8894c584c6f979caf9d2a4c17d1 2024-11-17T01:29:01,162 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/496bae3561d249c189479d6a7e4f55a4 2024-11-17T01:29:01,163 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/beab0bce07e248949fdb836be6e09678 2024-11-17T01:29:01,164 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/78425cc6e8404277a072a55cab1858c2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/78425cc6e8404277a072a55cab1858c2 2024-11-17T01:29:01,164 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/1b30a440d70e4d1aa250cb2fca135f06 2024-11-17T01:29:01,165 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3336a35df1a24b8893b30cf108dd4a6a 2024-11-17T01:29:01,165 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/109c5619a32546b8aba8c6a0e0f59a86 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/109c5619a32546b8aba8c6a0e0f59a86 2024-11-17T01:29:01,166 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/b20564ce97de4737bf1384812f320d10 2024-11-17T01:29:01,167 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/731279275c824495922d71766cd4e11d 2024-11-17T01:29:01,167 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/2d4d94dbca244ae4b69c8ece44ce5aab to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/2d4d94dbca244ae4b69c8ece44ce5aab 2024-11-17T01:29:01,168 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d720e0d045f407db7f3a1de38554244 2024-11-17T01:29:01,169 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/bfdfec0c90f54c3e95d13187ed284540 2024-11-17T01:29:01,169 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d2d31da0b31429dada36f4c1e8dfcba to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/3d2d31da0b31429dada36f4c1e8dfcba 2024-11-17T01:29:01,170 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/322db0af61654b659231b63d4934f36b 2024-11-17T01:29:01,171 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/c3e211f838394c858ec8ae3cc7dd64f6 2024-11-17T01:29:01,171 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/ee932b07a8c84dd2b6b54c0dc4642f33 2024-11-17T01:29:01,172 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/49ce8e7df9db473f82ab8acf243a7f39, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/570d885ad4ad4958a999e832d54688fd, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/33b461cd25f44a13abd9a555e933f598, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e8fb390c7da5421c8f2412f45effde30, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ad6ce9e838f5412c85de9d419ed5bd0e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/18399df23cf640c092431dd7b65aefbf, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9] to archive 2024-11-17T01:29:01,173 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:01,174 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/2b3426dd65cf443aa90892b07f93386f 2024-11-17T01:29:01,174 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/be2ed07c5bec455d8e872f0e5f31dac7 2024-11-17T01:29:01,175 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/49ce8e7df9db473f82ab8acf243a7f39 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/49ce8e7df9db473f82ab8acf243a7f39 2024-11-17T01:29:01,176 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ddef502b33244f60a0cbeb3bc2fb4403 2024-11-17T01:29:01,176 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0f18ad1799df4929924cc53bcf6abba9 2024-11-17T01:29:01,177 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/570d885ad4ad4958a999e832d54688fd to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/570d885ad4ad4958a999e832d54688fd 2024-11-17T01:29:01,178 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/3054d9fc644142fb9843750d3e20403d 2024-11-17T01:29:01,178 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/5bfc38c64b8e42fda96393b53631fe47 2024-11-17T01:29:01,179 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/33b461cd25f44a13abd9a555e933f598 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/33b461cd25f44a13abd9a555e933f598 2024-11-17T01:29:01,180 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/f2f352b0b82541abbc4e35c5fd8f1c4c 2024-11-17T01:29:01,180 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/0ee05632b99b4017b0ab4911a8d9a7b3 2024-11-17T01:29:01,181 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e8fb390c7da5421c8f2412f45effde30 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e8fb390c7da5421c8f2412f45effde30 2024-11-17T01:29:01,182 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/436f8cdb9a8e493e8f82656669e2777c 2024-11-17T01:29:01,182 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/581236ae6d9b4dbda1420ca656d234b0 2024-11-17T01:29:01,183 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ad6ce9e838f5412c85de9d419ed5bd0e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/ad6ce9e838f5412c85de9d419ed5bd0e 2024-11-17T01:29:01,184 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/54eaccbccb484c5ca56f3038078662a1 2024-11-17T01:29:01,184 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/99a4b54d2fba4faa9f0c0ea0240c22c3 2024-11-17T01:29:01,185 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/18399df23cf640c092431dd7b65aefbf to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/18399df23cf640c092431dd7b65aefbf 2024-11-17T01:29:01,186 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/70712603be2045b0a111a427d46289f7 2024-11-17T01:29:01,186 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c7c4b41b09e84fcab6b0618421c6601a 2024-11-17T01:29:01,187 DEBUG [StoreCloser-TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/c3433fbc81c44d2eaea5bb0ce5142cf9 2024-11-17T01:29:01,190 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/recovered.edits/324.seqid, newMaxSeqId=324, maxSeqId=1 2024-11-17T01:29:01,190 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b. 2024-11-17T01:29:01,190 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] regionserver.HRegion(1635): Region close journal for 54b0ceeaba8680637a0c22225fbae49b: 2024-11-17T01:29:01,191 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=147}] handler.UnassignRegionHandler(170): Closed 54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:29:01,192 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=146 updating hbase:meta row=54b0ceeaba8680637a0c22225fbae49b, regionState=CLOSED 2024-11-17T01:29:01,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-17T01:29:01,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseRegionProcedure 54b0ceeaba8680637a0c22225fbae49b, server=04f7e7347dc7,37721,1731806791503 in 1.4780 sec 2024-11-17T01:29:01,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=146, resume processing ppid=145 2024-11-17T01:29:01,194 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=145, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=54b0ceeaba8680637a0c22225fbae49b, UNASSIGN in 1.4830 sec 2024-11-17T01:29:01,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-17T01:29:01,195 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.4860 sec 2024-11-17T01:29:01,196 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806941196"}]},"ts":"1731806941196"} 2024-11-17T01:29:01,197 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:29:01,232 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:29:01,234 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5660 sec 2024-11-17T01:29:01,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-17T01:29:01,776 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-17T01:29:01,777 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:29:01,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,781 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=148, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-17T01:29:01,783 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=148, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,786 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:29:01,789 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/recovered.edits] 2024-11-17T01:29:01,792 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3a63de59405344b7a14ae22b0649f4c3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/3a63de59405344b7a14ae22b0649f4c3 2024-11-17T01:29:01,793 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/4e81fe8e1bdc45daa101f4be96e47536 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/4e81fe8e1bdc45daa101f4be96e47536 2024-11-17T01:29:01,795 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/b6fa50fdfbdc4d36b4716055d6f47316 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/A/b6fa50fdfbdc4d36b4716055d6f47316 2024-11-17T01:29:01,798 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/444fb483288f453dbeb9a315402a2eae to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/444fb483288f453dbeb9a315402a2eae 2024-11-17T01:29:01,799 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/87b572ab9af04a26b7d919579c0a463e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/87b572ab9af04a26b7d919579c0a463e 2024-11-17T01:29:01,800 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/f2950890010a46d0a0ab17ce9aa165e4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/B/f2950890010a46d0a0ab17ce9aa165e4 2024-11-17T01:29:01,801 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/39f5232de2904a9681dbc66c13744d99 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/39f5232de2904a9681dbc66c13744d99 2024-11-17T01:29:01,802 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/894f5da332d841339a3e1dce7e427b10 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/894f5da332d841339a3e1dce7e427b10 2024-11-17T01:29:01,802 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e345eb2bb08f4f3b84f34d2e909756da to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/C/e345eb2bb08f4f3b84f34d2e909756da 2024-11-17T01:29:01,804 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/recovered.edits/324.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b/recovered.edits/324.seqid 2024-11-17T01:29:01,804 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/54b0ceeaba8680637a0c22225fbae49b 2024-11-17T01:29:01,804 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:29:01,806 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=148, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,807 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:29:01,808 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:29:01,809 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=148, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,809 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:29:01,809 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806941809"}]},"ts":"9223372036854775807"} 2024-11-17T01:29:01,810 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:29:01,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 54b0ceeaba8680637a0c22225fbae49b, NAME => 'TestAcidGuarantees,,1731806915516.54b0ceeaba8680637a0c22225fbae49b.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:29:01,810 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:29:01,810 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806941810"}]},"ts":"9223372036854775807"} 2024-11-17T01:29:01,811 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:29:01,858 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=148, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,860 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 80 msec 2024-11-17T01:29:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-17T01:29:01,884 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-17T01:29:01,895 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=239 (was 239), OpenFileDescriptor=449 (was 453), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=298 (was 290) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3844 (was 3852) 2024-11-17T01:29:01,906 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239, OpenFileDescriptor=449, MaxFileDescriptor=1048576, SystemLoadAverage=298, ProcessCount=11, AvailableMemoryMB=3843 2024-11-17T01:29:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:29:01,908 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:29:01,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:01,910 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-17T01:29:01,910 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:01,910 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 149 2024-11-17T01:29:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:01,911 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-17T01:29:01,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742315_1491 (size=960) 2024-11-17T01:29:02,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:02,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:02,323 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50 2024-11-17T01:29:02,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742316_1492 (size=53) 2024-11-17T01:29:02,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:02,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:29:02,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 3f71bb4db1b15a8af73d9278bb8c8221, disabling compactions & flushes 2024-11-17T01:29:02,735 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:02,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:02,735 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. after waiting 0 ms 2024-11-17T01:29:02,736 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:02,736 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:02,736 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:02,738 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-17T01:29:02,739 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731806942738"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731806942738"}]},"ts":"1731806942738"} 2024-11-17T01:29:02,741 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-17T01:29:02,743 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-17T01:29:02,744 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806942743"}]},"ts":"1731806942743"} 2024-11-17T01:29:02,745 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-17T01:29:02,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, ASSIGN}] 2024-11-17T01:29:02,792 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, ASSIGN 2024-11-17T01:29:02,793 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=150, ppid=149, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, ASSIGN; state=OFFLINE, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=false 2024-11-17T01:29:02,944 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:02,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE; OpenRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:29:03,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:03,100 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:03,107 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:03,107 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:29:03,108 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,108 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:29:03,109 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,109 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,111 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,113 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:03,114 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName A 2024-11-17T01:29:03,114 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:03,114 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:03,115 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,116 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:03,116 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName B 2024-11-17T01:29:03,116 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:03,116 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:03,116 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,117 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:03,117 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName C 2024-11-17T01:29:03,117 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:03,118 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:03,118 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:03,119 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,119 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,120 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:29:03,121 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:03,122 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-17T01:29:03,123 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened 3f71bb4db1b15a8af73d9278bb8c8221; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62339754, jitterRate=-0.0710652768611908}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:29:03,123 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:03,124 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., pid=151, masterSystemTime=1731806943100 2024-11-17T01:29:03,125 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:03,125 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:03,126 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=150 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=OPEN, openSeqNum=2, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:03,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-17T01:29:03,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; OpenRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 in 180 msec 2024-11-17T01:29:03,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-11-17T01:29:03,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, ASSIGN in 338 msec 2024-11-17T01:29:03,130 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-17T01:29:03,130 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806943130"}]},"ts":"1731806943130"} 2024-11-17T01:29:03,131 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-17T01:29:03,175 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=149, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-17T01:29:03,178 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2670 sec 2024-11-17T01:29:04,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=149 2024-11-17T01:29:04,022 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 149 completed 2024-11-17T01:29:04,025 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1cbd2497 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17e5a47d 2024-11-17T01:29:04,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cbfd84f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:04,088 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:04,091 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47422, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:04,094 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-17T01:29:04,095 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45206, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-17T01:29:04,097 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-17T01:29:04,097 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-17T01:29:04,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:04,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742317_1493 (size=996) 2024-11-17T01:29:04,516 DEBUG [PEWorker-3 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-17T01:29:04,516 INFO [PEWorker-3 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-17T01:29:04,522 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:29:04,526 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, REOPEN/MOVE}] 2024-11-17T01:29:04,527 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, REOPEN/MOVE 2024-11-17T01:29:04,529 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:04,530 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:29:04,530 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE; CloseRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:29:04,681 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:04,682 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(124): Close 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:04,683 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:29:04,683 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1681): Closing 3f71bb4db1b15a8af73d9278bb8c8221, disabling compactions & flushes 2024-11-17T01:29:04,683 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:04,683 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:04,683 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. after waiting 0 ms 2024-11-17T01:29:04,683 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:04,690 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-17T01:29:04,691 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:04,691 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegion(1635): Region close journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:04,691 WARN [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] regionserver.HRegionServer(3786): Not adding moved region record: 3f71bb4db1b15a8af73d9278bb8c8221 to self. 2024-11-17T01:29:04,692 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=155}] handler.UnassignRegionHandler(170): Closed 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:04,693 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=CLOSED 2024-11-17T01:29:04,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-17T01:29:04,695 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; CloseRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 in 164 msec 2024-11-17T01:29:04,696 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=154, ppid=153, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, REOPEN/MOVE; state=CLOSED, location=04f7e7347dc7,37721,1731806791503; forceNewPlan=false, retain=true 2024-11-17T01:29:04,847 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=OPENING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:04,849 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=154, state=RUNNABLE; OpenRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:29:05,003 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,006 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,006 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7285): Opening region: {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} 2024-11-17T01:29:05,007 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,007 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-17T01:29:05,007 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7327): checking encryption for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,007 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(7330): checking classloading for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,009 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,010 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:05,011 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName A 2024-11-17T01:29:05,013 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:05,014 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:05,014 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,015 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:05,016 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName B 2024-11-17T01:29:05,016 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:05,017 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:05,017 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,017 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-17T01:29:05,018 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3f71bb4db1b15a8af73d9278bb8c8221 columnFamilyName C 2024-11-17T01:29:05,018 DEBUG [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:05,018 INFO [StoreOpener-3f71bb4db1b15a8af73d9278bb8c8221-1 {}] regionserver.HStore(327): Store=3f71bb4db1b15a8af73d9278bb8c8221/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-17T01:29:05,019 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,019 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,021 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,023 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-17T01:29:05,025 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1085): writing seq id for 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,026 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1102): Opened 3f71bb4db1b15a8af73d9278bb8c8221; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71068043, jitterRate=0.05899636447429657}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-17T01:29:05,027 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegion(1001): Region open journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:05,027 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., pid=156, masterSystemTime=1731806945003 2024-11-17T01:29:05,029 DEBUG [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,029 INFO [RS_OPEN_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_OPEN_REGION, pid=156}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,030 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=154 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=OPEN, openSeqNum=5, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=154 2024-11-17T01:29:05,033 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=154, state=SUCCESS; OpenRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 in 182 msec 2024-11-17T01:29:05,034 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=154, resume processing ppid=153 2024-11-17T01:29:05,035 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=153, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, REOPEN/MOVE in 507 msec 2024-11-17T01:29:05,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-17T01:29:05,037 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-17T01:29:05,039 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 940 msec 2024-11-17T01:29:05,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-11-17T01:29:05,042 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5765d46a to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d9954b7 2024-11-17T01:29:05,084 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3fb684eb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,086 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ac53e79 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d5efb7a 2024-11-17T01:29:05,099 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@644b7e6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,100 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x05bc9c3e to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fc332d8 2024-11-17T01:29:05,107 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c9b5141, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,108 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7181df3b to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17327621 2024-11-17T01:29:05,115 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11a52cdf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,116 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11030ef5 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1584f18a 2024-11-17T01:29:05,124 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2d7fe431, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,124 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b914bf4 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91d72db 2024-11-17T01:29:05,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58971172, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,133 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3f6a59e4 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5d836f78 2024-11-17T01:29:05,141 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d7fe93b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,141 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x150e08ed to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53305d9b 2024-11-17T01:29:05,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11c440f7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,149 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3a3b66d3 to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6bb6288a 2024-11-17T01:29:05,157 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58460ef3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,158 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5cfdf76c to 127.0.0.1:63898 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6556601 2024-11-17T01:29:05,165 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e8cd1ae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-17T01:29:05,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:05,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees 2024-11-17T01:29:05,169 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:05,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:05,169 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=157, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:05,169 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:05,175 DEBUG [hconnection-0x3728468b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,175 DEBUG [hconnection-0x29968d0f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,176 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47434, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,176 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47426, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,176 DEBUG [hconnection-0x2d56e567-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,176 DEBUG [hconnection-0x5eb11946-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,176 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,177 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,177 DEBUG [hconnection-0xcad36d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,178 DEBUG [hconnection-0x1e386e06-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,178 DEBUG [hconnection-0x5512c8e1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,178 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47462, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,179 DEBUG [hconnection-0x5e9838f1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,179 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47488, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,179 DEBUG [hconnection-0x11d83f6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,179 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47474, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,180 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47494, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,180 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47496, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,183 DEBUG [hconnection-0xc3edd28-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-17T01:29:05,184 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47506, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-17T01:29:05,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,185 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:05,186 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:05,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807005197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807005197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,200 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807005197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807005198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807005199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,216 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117902649763f174a6584261c33c87af170_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806945182/Put/seqid=0 2024-11-17T01:29:05,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742318_1494 (size=12154) 2024-11-17T01:29:05,228 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:05,230 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117902649763f174a6584261c33c87af170_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117902649763f174a6584261c33c87af170_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:05,231 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/11f6e67a6ba14799bb71bc07049dcc2c, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:05,231 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/11f6e67a6ba14799bb71bc07049dcc2c is 175, key is test_row_0/A:col10/1731806945182/Put/seqid=0 2024-11-17T01:29:05,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742319_1495 (size=30955) 2024-11-17T01:29:05,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:05,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807005301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807005301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807005301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807005301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807005301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,321 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:05,473 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,473 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:05,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807005503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807005503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807005503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,505 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807005503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,506 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807005504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,625 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,626 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,626 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,651 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=18, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/11f6e67a6ba14799bb71bc07049dcc2c 2024-11-17T01:29:05,679 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e8d440bdcd3741458629e36322342c44 is 50, key is test_row_0/B:col10/1731806945182/Put/seqid=0 2024-11-17T01:29:05,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742320_1496 (size=12001) 2024-11-17T01:29:05,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:05,778 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,778 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,778 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,778 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807005805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807005806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807005806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807005806, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:05,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807005808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,930 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:05,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:05,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,083 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,083 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,083 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e8d440bdcd3741458629e36322342c44 2024-11-17T01:29:06,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/10714605d6b245e1a44dfd4a9321a134 is 50, key is test_row_0/C:col10/1731806945182/Put/seqid=0 2024-11-17T01:29:06,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742321_1497 (size=12001) 2024-11-17T01:29:06,235 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,235 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:06,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:06,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,235 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:06,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:06,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807006308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807006309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807006309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,312 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:06,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807006311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:06,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807006314, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,387 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:06,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,387 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:06,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,388 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] handler.RSProcedureHandler(58): pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=158 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=158 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:06,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/10714605d6b245e1a44dfd4a9321a134 2024-11-17T01:29:06,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/11f6e67a6ba14799bb71bc07049dcc2c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c 2024-11-17T01:29:06,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c, entries=150, sequenceid=18, filesize=30.2 K 2024-11-17T01:29:06,514 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e8d440bdcd3741458629e36322342c44 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44 2024-11-17T01:29:06,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44, entries=150, sequenceid=18, filesize=11.7 K 2024-11-17T01:29:06,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/10714605d6b245e1a44dfd4a9321a134 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134 2024-11-17T01:29:06,524 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134, entries=150, sequenceid=18, filesize=11.7 K 2024-11-17T01:29:06,524 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1339ms, sequenceid=18, compaction requested=false 2024-11-17T01:29:06,524 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:06,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:06,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=158 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:06,540 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:06,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:06,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111768912b4298594c58a49cbe9541d56c7c_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806945193/Put/seqid=0 2024-11-17T01:29:06,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742322_1498 (size=12154) 2024-11-17T01:29:06,708 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-17T01:29:06,954 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:06,957 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111768912b4298594c58a49cbe9541d56c7c_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111768912b4298594c58a49cbe9541d56c7c_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:06,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/a6f905cce5594080b78ec93438384b26, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:06,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/a6f905cce5594080b78ec93438384b26 is 175, key is test_row_0/A:col10/1731806945193/Put/seqid=0 2024-11-17T01:29:06,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742323_1499 (size=30955) 2024-11-17T01:29:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:07,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:07,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:07,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807007320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807007320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807007321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807007321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807007321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,362 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/a6f905cce5594080b78ec93438384b26 2024-11-17T01:29:07,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/d358d4eb1cf644d4b871012b080fbe5e is 50, key is test_row_0/B:col10/1731806945193/Put/seqid=0 2024-11-17T01:29:07,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742324_1500 (size=12001) 2024-11-17T01:29:07,424 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807007423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807007423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807007424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807007424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807007424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807007625, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807007626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807007627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807007627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,629 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807007627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,781 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/d358d4eb1cf644d4b871012b080fbe5e 2024-11-17T01:29:07,787 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1735df6551304dcca4540fe0cb0ddfb2 is 50, key is test_row_0/C:col10/1731806945193/Put/seqid=0 2024-11-17T01:29:07,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742325_1501 (size=12001) 2024-11-17T01:29:07,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807007927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,929 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807007928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807007930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807007930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:07,931 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:07,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807007930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,190 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1735df6551304dcca4540fe0cb0ddfb2 2024-11-17T01:29:08,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/a6f905cce5594080b78ec93438384b26 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26 2024-11-17T01:29:08,196 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26, entries=150, sequenceid=40, filesize=30.2 K 2024-11-17T01:29:08,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/d358d4eb1cf644d4b871012b080fbe5e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e 2024-11-17T01:29:08,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,198 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:29:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1735df6551304dcca4540fe0cb0ddfb2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2 2024-11-17T01:29:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2, entries=150, sequenceid=40, filesize=11.7 K 2024-11-17T01:29:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1663ms, sequenceid=40, compaction requested=false 2024-11-17T01:29:08,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:08,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=158}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=158 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=158 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=157 2024-11-17T01:29:08,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.0360 sec 2024-11-17T01:29:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=157, table=TestAcidGuarantees in 3.0390 sec 2024-11-17T01:29:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:08,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,435 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:08,436 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,442 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117482418fd8ad3454ea55741d305358b40_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:08,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742326_1502 (size=12154) 2024-11-17T01:29:08,451 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807008449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807008450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807008450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,453 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807008451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807008451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807008552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807008554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807008554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,555 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807008554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807008555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807008756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,757 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807008757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,759 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807008757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807008757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:08,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807008758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:08,846 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:08,849 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117482418fd8ad3454ea55741d305358b40_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117482418fd8ad3454ea55741d305358b40_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:08,850 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1bc25b13ede44b97b13f82534bea2c79, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:08,851 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1bc25b13ede44b97b13f82534bea2c79 is 175, key is test_row_0/A:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:08,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742327_1503 (size=30951) 2024-11-17T01:29:08,855 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1bc25b13ede44b97b13f82534bea2c79 2024-11-17T01:29:08,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/ce85bffea25e47f4a9733671cb214d9d is 50, key is test_row_0/B:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742328_1504 (size=9657) 2024-11-17T01:29:09,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807009059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807009059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807009059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807009061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807009061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,263 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/ce85bffea25e47f4a9733671cb214d9d 2024-11-17T01:29:09,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/36783b211a274dc0becbf1db87535d0f is 50, key is test_row_0/C:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:09,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742329_1505 (size=9657) 2024-11-17T01:29:09,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=157 2024-11-17T01:29:09,273 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 157 completed 2024-11-17T01:29:09,273 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-17T01:29:09,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:09,274 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:09,275 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:09,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:09,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:09,426 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,426 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-17T01:29:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,427 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807009562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,565 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807009563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807009564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,566 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807009565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:09,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807009565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:09,578 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,579 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-17T01:29:09,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:09,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,579 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,579 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:09,675 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/36783b211a274dc0becbf1db87535d0f 2024-11-17T01:29:09,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1bc25b13ede44b97b13f82534bea2c79 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79 2024-11-17T01:29:09,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79, entries=150, sequenceid=55, filesize=30.2 K 2024-11-17T01:29:09,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/ce85bffea25e47f4a9733671cb214d9d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d 2024-11-17T01:29:09,683 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d, entries=100, sequenceid=55, filesize=9.4 K 2024-11-17T01:29:09,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/36783b211a274dc0becbf1db87535d0f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f 2024-11-17T01:29:09,686 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f, entries=100, sequenceid=55, filesize=9.4 K 2024-11-17T01:29:09,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1252ms, sequenceid=55, compaction requested=true 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:09,687 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92861 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:09,687 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:09,687 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,687 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,688 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=90.7 K 2024-11-17T01:29:09,688 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=32.9 K 2024-11-17T01:29:09,688 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79] 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e8d440bdcd3741458629e36322342c44, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806945182 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 11f6e67a6ba14799bb71bc07049dcc2c, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806945182 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting d358d4eb1cf644d4b871012b080fbe5e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806945193 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting a6f905cce5594080b78ec93438384b26, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806945193 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bc25b13ede44b97b13f82534bea2c79, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806947319 2024-11-17T01:29:09,688 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting ce85bffea25e47f4a9733671cb214d9d, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806947319 2024-11-17T01:29:09,692 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:09,693 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:09,694 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/4fcdb6371111463183916eae7c329892 is 50, key is test_row_0/B:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:09,694 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117d279c5be9ff947c8ac7a22a41bef1a9b_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:09,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117d279c5be9ff947c8ac7a22a41bef1a9b_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:09,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117d279c5be9ff947c8ac7a22a41bef1a9b_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:09,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742330_1506 (size=12104) 2024-11-17T01:29:09,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742331_1507 (size=4469) 2024-11-17T01:29:09,731 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:09,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-17T01:29:09,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:09,731 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-17T01:29:09,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:09,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:09,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:09,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:09,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:09,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:09,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172ba75c23183f49998bf682d374d4d6f9_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806948450/Put/seqid=0 2024-11-17T01:29:09,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742332_1508 (size=12154) 2024-11-17T01:29:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:10,101 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#430 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:10,102 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/12f23ea68d3c47b985264408e224ae99 is 175, key is test_row_0/A:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:10,103 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/4fcdb6371111463183916eae7c329892 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/4fcdb6371111463183916eae7c329892 2024-11-17T01:29:10,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742333_1509 (size=31165) 2024-11-17T01:29:10,106 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into 4fcdb6371111463183916eae7c329892(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:10,106 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:10,106 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806949687; duration=0sec 2024-11-17T01:29:10,106 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:10,106 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:10,106 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:10,107 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:10,107 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:10,107 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:10,107 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=32.9 K 2024-11-17T01:29:10,107 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 10714605d6b245e1a44dfd4a9321a134, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=18, earliestPutTs=1731806945182 2024-11-17T01:29:10,108 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1735df6551304dcca4540fe0cb0ddfb2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731806945193 2024-11-17T01:29:10,108 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 36783b211a274dc0becbf1db87535d0f, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806947319 2024-11-17T01:29:10,112 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#432 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:10,113 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/ceee3466e3ab4951a86a09e9bed08e5f is 50, key is test_row_0/C:col10/1731806948434/Put/seqid=0 2024-11-17T01:29:10,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742334_1510 (size=12104) 2024-11-17T01:29:10,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:10,144 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172ba75c23183f49998bf682d374d4d6f9_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172ba75c23183f49998bf682d374d4d6f9_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:10,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/02ac3ca2b421463ca9e0b5a473938967, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:10,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/02ac3ca2b421463ca9e0b5a473938967 is 175, key is test_row_0/A:col10/1731806948450/Put/seqid=0 2024-11-17T01:29:10,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742335_1511 (size=30955) 2024-11-17T01:29:10,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:10,528 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/12f23ea68d3c47b985264408e224ae99 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99 2024-11-17T01:29:10,528 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/ceee3466e3ab4951a86a09e9bed08e5f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/ceee3466e3ab4951a86a09e9bed08e5f 2024-11-17T01:29:10,531 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into 12f23ea68d3c47b985264408e224ae99(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:10,531 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into ceee3466e3ab4951a86a09e9bed08e5f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:10,531 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:10,531 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:10,532 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806949687; duration=0sec 2024-11-17T01:29:10,532 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806949687; duration=0sec 2024-11-17T01:29:10,532 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:10,532 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:10,532 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:10,532 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:10,548 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=76, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/02ac3ca2b421463ca9e0b5a473938967 2024-11-17T01:29:10,554 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f4a11c6e21ed4d09a49b5f2fcc0f256e is 50, key is test_row_0/B:col10/1731806948450/Put/seqid=0 2024-11-17T01:29:10,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742336_1512 (size=12001) 2024-11-17T01:29:10,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:10,566 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:10,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807010575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807010576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,579 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807010577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807010577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807010578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807010679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807010679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807010680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,681 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807010680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807010681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807010883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807010883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,884 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807010883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,885 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807010883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:10,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807010884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:10,963 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f4a11c6e21ed4d09a49b5f2fcc0f256e 2024-11-17T01:29:10,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/5eda42ba3cb3434eb143bd6e57ee8b60 is 50, key is test_row_0/C:col10/1731806948450/Put/seqid=0 2024-11-17T01:29:10,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742337_1513 (size=12001) 2024-11-17T01:29:11,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807011185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807011185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807011186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807011187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807011187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,372 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/5eda42ba3cb3434eb143bd6e57ee8b60 2024-11-17T01:29:11,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/02ac3ca2b421463ca9e0b5a473938967 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967 2024-11-17T01:29:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:11,378 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967, entries=150, sequenceid=76, filesize=30.2 K 2024-11-17T01:29:11,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f4a11c6e21ed4d09a49b5f2fcc0f256e as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e 2024-11-17T01:29:11,380 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e, entries=150, sequenceid=76, filesize=11.7 K 2024-11-17T01:29:11,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/5eda42ba3cb3434eb143bd6e57ee8b60 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60 2024-11-17T01:29:11,384 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60, entries=150, sequenceid=76, filesize=11.7 K 2024-11-17T01:29:11,384 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1653ms, sequenceid=76, compaction requested=false 2024-11-17T01:29:11,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:11,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:11,384 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-17T01:29:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-17T01:29:11,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-17T01:29:11,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1100 sec 2024-11-17T01:29:11,387 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 2.1140 sec 2024-11-17T01:29:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:11,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:11,689 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:11,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172e2be5597cb74bcb9930ad9afbed2318_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:11,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742338_1514 (size=12154) 2024-11-17T01:29:11,702 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807011700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807011700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,703 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807011701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807011701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807011702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,805 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807011803, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807011804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,807 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807011804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,807 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807011804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:11,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807011805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807012007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,009 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807012008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807012008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807012008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807012009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,098 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:12,100 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411172e2be5597cb74bcb9930ad9afbed2318_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172e2be5597cb74bcb9930ad9afbed2318_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:12,101 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/f2321d5507894ea38d1f3dc30dd23a29, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:12,101 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/f2321d5507894ea38d1f3dc30dd23a29 is 175, key is test_row_0/A:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:12,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742339_1515 (size=30955) 2024-11-17T01:29:12,312 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807012310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807012311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807012311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807012312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807012312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,506 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/f2321d5507894ea38d1f3dc30dd23a29 2024-11-17T01:29:12,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/dae306d255764bb2857888eb012f05f2 is 50, key is test_row_0/B:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:12,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742340_1516 (size=12001) 2024-11-17T01:29:12,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807012813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807012816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807012816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807012817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,820 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807012817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:12,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/dae306d255764bb2857888eb012f05f2 2024-11-17T01:29:12,919 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/713eaa5217f94dfeab4c4c9b717ca345 is 50, key is test_row_0/C:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:12,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742341_1517 (size=12001) 2024-11-17T01:29:13,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/713eaa5217f94dfeab4c4c9b717ca345 2024-11-17T01:29:13,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/f2321d5507894ea38d1f3dc30dd23a29 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29 2024-11-17T01:29:13,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29, entries=150, sequenceid=95, filesize=30.2 K 2024-11-17T01:29:13,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/dae306d255764bb2857888eb012f05f2 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2 2024-11-17T01:29:13,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2, entries=150, sequenceid=95, filesize=11.7 K 2024-11-17T01:29:13,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/713eaa5217f94dfeab4c4c9b717ca345 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345 2024-11-17T01:29:13,333 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345, entries=150, sequenceid=95, filesize=11.7 K 2024-11-17T01:29:13,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1646ms, sequenceid=95, compaction requested=true 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:13,334 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:13,334 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:13,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:13,335 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93075 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:13,335 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:13,336 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:13,336 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=90.9 K 2024-11-17T01:29:13,336 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:13,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29] 2024-11-17T01:29:13,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 12f23ea68d3c47b985264408e224ae99, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806945198 2024-11-17T01:29:13,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 02ac3ca2b421463ca9e0b5a473938967, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806948445 2024-11-17T01:29:13,336 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting f2321d5507894ea38d1f3dc30dd23a29, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:13,339 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:13,339 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:13,339 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:13,339 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/4fcdb6371111463183916eae7c329892, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.3 K 2024-11-17T01:29:13,339 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 4fcdb6371111463183916eae7c329892, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806945198 2024-11-17T01:29:13,339 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f4a11c6e21ed4d09a49b5f2fcc0f256e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806948445 2024-11-17T01:29:13,340 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting dae306d255764bb2857888eb012f05f2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:13,340 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:13,341 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117bbd55f9ec71d42c585cb520beccc163a_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:13,343 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117bbd55f9ec71d42c585cb520beccc163a_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:13,343 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117bbd55f9ec71d42c585cb520beccc163a_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:13,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742342_1518 (size=4469) 2024-11-17T01:29:13,347 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#438 average throughput is 4.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:13,347 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/daf4b569cc504af6913c3406583989e1 is 175, key is test_row_0/A:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:13,348 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:13,348 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/5817f111a15f4179b42763bb59aceae4 is 50, key is test_row_0/B:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:13,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742343_1519 (size=31161) 2024-11-17T01:29:13,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742344_1520 (size=12207) 2024-11-17T01:29:13,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-17T01:29:13,378 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-17T01:29:13,379 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-17T01:29:13,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-17T01:29:13,380 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:13,380 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:13,380 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:13,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-17T01:29:13,532 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,532 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-17T01:29:13,532 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:13,532 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:13,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:13,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ee2d6c332b9a42c383a428e669ebbac2_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806951701/Put/seqid=0 2024-11-17T01:29:13,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742345_1521 (size=12154) 2024-11-17T01:29:13,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-17T01:29:13,756 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/daf4b569cc504af6913c3406583989e1 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1 2024-11-17T01:29:13,759 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into daf4b569cc504af6913c3406583989e1(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:13,759 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:13,759 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806953334; duration=0sec 2024-11-17T01:29:13,759 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:13,759 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:13,760 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:13,761 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:13,761 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:13,761 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:13,761 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/ceee3466e3ab4951a86a09e9bed08e5f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.3 K 2024-11-17T01:29:13,761 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ceee3466e3ab4951a86a09e9bed08e5f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731806945198 2024-11-17T01:29:13,761 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5eda42ba3cb3434eb143bd6e57ee8b60, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1731806948445 2024-11-17T01:29:13,762 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 713eaa5217f94dfeab4c4c9b717ca345, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:13,764 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/5817f111a15f4179b42763bb59aceae4 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5817f111a15f4179b42763bb59aceae4 2024-11-17T01:29:13,767 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into 5817f111a15f4179b42763bb59aceae4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:13,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:13,768 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:13,768 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806953334; duration=0sec 2024-11-17T01:29:13,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:13,768 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:13,768 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/c953c60bb5164f4f9e8ada2ae4248be7 is 50, key is test_row_0/C:col10/1731806950577/Put/seqid=0 2024-11-17T01:29:13,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742346_1522 (size=12207) 2024-11-17T01:29:13,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:13,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807013833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807013833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807013836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807013837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807013837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807013938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,939 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807013938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:13,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807013939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807013941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807013942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:13,944 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ee2d6c332b9a42c383a428e669ebbac2_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ee2d6c332b9a42c383a428e669ebbac2_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:13,945 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/669c147adf484c218eacd42506a05438, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:13,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/669c147adf484c218eacd42506a05438 is 175, key is test_row_0/A:col10/1731806951701/Put/seqid=0 2024-11-17T01:29:13,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742347_1523 (size=30955) 2024-11-17T01:29:13,951 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=115, memsize=38.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/669c147adf484c218eacd42506a05438 2024-11-17T01:29:13,957 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/5ad22ab46d1e49e798cd4f23ecb5565b is 50, key is test_row_0/B:col10/1731806951701/Put/seqid=0 2024-11-17T01:29:13,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742348_1524 (size=12001) 2024-11-17T01:29:13,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-17T01:29:14,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807014140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807014141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807014144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807014144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,145 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807014144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,174 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/c953c60bb5164f4f9e8ada2ae4248be7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/c953c60bb5164f4f9e8ada2ae4248be7 2024-11-17T01:29:14,178 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into c953c60bb5164f4f9e8ada2ae4248be7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:14,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:14,178 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806953334; duration=0sec 2024-11-17T01:29:14,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:14,178 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:14,364 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/5ad22ab46d1e49e798cd4f23ecb5565b 2024-11-17T01:29:14,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/6e73940d1b4240f68cfecef1b72f4cde is 50, key is test_row_0/C:col10/1731806951701/Put/seqid=0 2024-11-17T01:29:14,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742349_1525 (size=12001) 2024-11-17T01:29:14,377 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/6e73940d1b4240f68cfecef1b72f4cde 2024-11-17T01:29:14,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/669c147adf484c218eacd42506a05438 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438 2024-11-17T01:29:14,384 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438, entries=150, sequenceid=115, filesize=30.2 K 2024-11-17T01:29:14,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/5ad22ab46d1e49e798cd4f23ecb5565b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b 2024-11-17T01:29:14,389 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b, entries=150, sequenceid=115, filesize=11.7 K 2024-11-17T01:29:14,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/6e73940d1b4240f68cfecef1b72f4cde as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde 2024-11-17T01:29:14,393 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde, entries=150, sequenceid=115, filesize=11.7 K 2024-11-17T01:29:14,394 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 3f71bb4db1b15a8af73d9278bb8c8221 in 862ms, sequenceid=115, compaction requested=false 2024-11-17T01:29:14,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:14,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-17T01:29:14,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-17T01:29:14,395 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-17T01:29:14,396 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0150 sec 2024-11-17T01:29:14,397 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 1.0170 sec 2024-11-17T01:29:14,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:14,445 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-17T01:29:14,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:14,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:14,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:14,446 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:14,447 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:14,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ea9904623e1a49729cde2ca0aba76b51_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:14,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742350_1526 (size=14794) 2024-11-17T01:29:14,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,465 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807014460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807014461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807014462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807014462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,466 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807014462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-17T01:29:14,482 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-17T01:29:14,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-17T01:29:14,484 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:14,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:14,484 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:14,485 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:14,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807014566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807014566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807014566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807014566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,568 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807014566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:14,635 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,636 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807014768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807014768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807014769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807014769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:14,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807014770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:14,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,788 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,788 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,859 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:14,861 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117ea9904623e1a49729cde2ca0aba76b51_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea9904623e1a49729cde2ca0aba76b51_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:14,862 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/89ec4228e6df4d35aac0ca1d4482a035, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:14,862 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/89ec4228e6df4d35aac0ca1d4482a035 is 175, key is test_row_0/A:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:14,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742351_1527 (size=39749) 2024-11-17T01:29:14,940 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:14,940 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:14,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:14,940 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:14,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807015070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,072 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807015071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807015073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807015073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807015074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:15,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,092 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:15,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,092 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,265 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/89ec4228e6df4d35aac0ca1d4482a035 2024-11-17T01:29:15,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/15344626c1d8424e90bb5fb83275b3e8 is 50, key is test_row_0/B:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:15,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742352_1528 (size=12151) 2024-11-17T01:29:15,396 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,397 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,549 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,549 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:15,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:15,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,549 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:15,575 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807015573, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807015574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,576 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807015575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807015578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:15,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807015578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:15,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/15344626c1d8424e90bb5fb83275b3e8 2024-11-17T01:29:15,678 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/a35d475198b942c5920b138a7bd4447a is 50, key is test_row_0/C:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:15,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742353_1529 (size=12151) 2024-11-17T01:29:15,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/a35d475198b942c5920b138a7bd4447a 2024-11-17T01:29:15,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/89ec4228e6df4d35aac0ca1d4482a035 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035 2024-11-17T01:29:15,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035, entries=200, sequenceid=137, filesize=38.8 K 2024-11-17T01:29:15,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/15344626c1d8424e90bb5fb83275b3e8 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8 2024-11-17T01:29:15,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8, entries=150, sequenceid=137, filesize=11.9 K 2024-11-17T01:29:15,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/a35d475198b942c5920b138a7bd4447a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a 2024-11-17T01:29:15,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a, entries=150, sequenceid=137, filesize=11.9 K 2024-11-17T01:29:15,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1249ms, sequenceid=137, compaction requested=true 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:15,695 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:15,695 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:15,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:15,696 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,696 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5817f111a15f4179b42763bb59aceae4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.5 K 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:15,696 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5817f111a15f4179b42763bb59aceae4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:15,696 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=99.5 K 2024-11-17T01:29:15,696 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035] 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 5ad22ab46d1e49e798cd4f23ecb5565b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806951699 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting daf4b569cc504af6913c3406583989e1, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:15,696 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 15344626c1d8424e90bb5fb83275b3e8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953836 2024-11-17T01:29:15,697 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 669c147adf484c218eacd42506a05438, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806951699 2024-11-17T01:29:15,697 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89ec4228e6df4d35aac0ca1d4482a035, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953832 2024-11-17T01:29:15,701 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:15,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:15,702 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:15,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:15,703 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#447 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:15,703 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f28a408f5a494db0809833b4c541915b is 50, key is test_row_0/B:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:15,704 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:15,716 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117f29593aa7e9c41e786f2a09a4ea66f5b_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:15,718 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117f29593aa7e9c41e786f2a09a4ea66f5b_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:15,718 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f29593aa7e9c41e786f2a09a4ea66f5b_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:15,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411173ca97292768f4148af5d87c63b2120ca_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806954460/Put/seqid=0 2024-11-17T01:29:15,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742354_1530 (size=12459) 2024-11-17T01:29:15,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742355_1531 (size=4469) 2024-11-17T01:29:15,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742356_1532 (size=12304) 2024-11-17T01:29:15,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:15,744 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411173ca97292768f4148af5d87c63b2120ca_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411173ca97292768f4148af5d87c63b2120ca_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:15,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/465cac198c484749ad33f646573a87c7, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:15,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/465cac198c484749ad33f646573a87c7 is 175, key is test_row_0/A:col10/1731806954460/Put/seqid=0 2024-11-17T01:29:15,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742357_1533 (size=31105) 2024-11-17T01:29:15,748 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=155, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/465cac198c484749ad33f646573a87c7 2024-11-17T01:29:15,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/1afce19dce8c436abbe3be507de1034d is 50, key is test_row_0/B:col10/1731806954460/Put/seqid=0 2024-11-17T01:29:15,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742358_1534 (size=12151) 2024-11-17T01:29:16,123 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f28a408f5a494db0809833b4c541915b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f28a408f5a494db0809833b4c541915b 2024-11-17T01:29:16,126 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into f28a408f5a494db0809833b4c541915b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:16,126 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:16,126 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806955695; duration=0sec 2024-11-17T01:29:16,126 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:16,126 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:16,126 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:16,127 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:16,127 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:16,127 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:16,127 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/c953c60bb5164f4f9e8ada2ae4248be7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.5 K 2024-11-17T01:29:16,127 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting c953c60bb5164f4f9e8ada2ae4248be7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731806950575 2024-11-17T01:29:16,128 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e73940d1b4240f68cfecef1b72f4cde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731806951699 2024-11-17T01:29:16,130 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting a35d475198b942c5920b138a7bd4447a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953836 2024-11-17T01:29:16,135 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#451 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:16,135 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#448 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:16,135 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/baf19623be4447868df6e0826540e61b is 50, key is test_row_0/C:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:16,135 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/3a7a0eda355b4e47a48c4803565d27c3 is 175, key is test_row_0/A:col10/1731806954444/Put/seqid=0 2024-11-17T01:29:16,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742359_1535 (size=31413) 2024-11-17T01:29:16,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742360_1536 (size=12459) 2024-11-17T01:29:16,156 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/1afce19dce8c436abbe3be507de1034d 2024-11-17T01:29:16,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2a77d674bc044f059b1e61d14d83d6fa is 50, key is test_row_0/C:col10/1731806954460/Put/seqid=0 2024-11-17T01:29:16,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742361_1537 (size=12151) 2024-11-17T01:29:16,542 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/baf19623be4447868df6e0826540e61b as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/baf19623be4447868df6e0826540e61b 2024-11-17T01:29:16,542 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/3a7a0eda355b4e47a48c4803565d27c3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3 2024-11-17T01:29:16,544 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into baf19623be4447868df6e0826540e61b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:16,544 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:16,544 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806955695; duration=0sec 2024-11-17T01:29:16,545 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:16,545 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:16,545 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into 3a7a0eda355b4e47a48c4803565d27c3(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:16,545 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:16,545 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806955695; duration=0sec 2024-11-17T01:29:16,545 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:16,545 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:16,566 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2a77d674bc044f059b1e61d14d83d6fa 2024-11-17T01:29:16,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/465cac198c484749ad33f646573a87c7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7 2024-11-17T01:29:16,571 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7, entries=150, sequenceid=155, filesize=30.4 K 2024-11-17T01:29:16,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/1afce19dce8c436abbe3be507de1034d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d 2024-11-17T01:29:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,574 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d, entries=150, sequenceid=155, filesize=11.9 K 2024-11-17T01:29:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2a77d674bc044f059b1e61d14d83d6fa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa, entries=150, sequenceid=155, filesize=11.9 K 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=0 B/0 for 3f71bb4db1b15a8af73d9278bb8c8221 in 883ms, sequenceid=155, compaction requested=false 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:16,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-17T01:29:16,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1010 sec 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,595 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 2.1050 sec 2024-11-17T01:29:16,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:16,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-17T01:29:16,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:16,620 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:16,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117e423709eb4eb48b0a64a50099c556d44_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:16,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742362_1538 (size=19774) 2024-11-17T01:29:16,638 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:16,640 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117e423709eb4eb48b0a64a50099c556d44_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117e423709eb4eb48b0a64a50099c556d44_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:16,641 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/84b5a88fa40d4e4889aca4611206b9ea, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:16,642 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/84b5a88fa40d4e4889aca4611206b9ea is 175, key is test_row_0/A:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:16,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742363_1539 (size=57033) 2024-11-17T01:29:16,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807016649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807016650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,657 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807016651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,658 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807016652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,659 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807016656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,759 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807016757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807016758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807016758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807016758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,761 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807016760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807016961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807016961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807016961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,963 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807016961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:16,964 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:16,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807016963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,051 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/84b5a88fa40d4e4889aca4611206b9ea 2024-11-17T01:29:17,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/bb5bde7114664aa4a48e0f9e130ec51f is 50, key is test_row_0/B:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:17,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742364_1540 (size=12151) 2024-11-17T01:29:17,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807017263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807017264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807017264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,267 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807017265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807017266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,459 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/bb5bde7114664aa4a48e0f9e130ec51f 2024-11-17T01:29:17,464 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/e14f6c3acd044f8f9370926b4aa88919 is 50, key is test_row_0/C:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:17,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742365_1541 (size=12151) 2024-11-17T01:29:17,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807017765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,767 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807017765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,767 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807017766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807017768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:17,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807017768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:17,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/e14f6c3acd044f8f9370926b4aa88919 2024-11-17T01:29:17,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/84b5a88fa40d4e4889aca4611206b9ea as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea 2024-11-17T01:29:17,872 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea, entries=300, sequenceid=170, filesize=55.7 K 2024-11-17T01:29:17,873 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/bb5bde7114664aa4a48e0f9e130ec51f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f 2024-11-17T01:29:17,875 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f, entries=150, sequenceid=170, filesize=11.9 K 2024-11-17T01:29:17,875 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/e14f6c3acd044f8f9370926b4aa88919 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919 2024-11-17T01:29:17,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919, entries=150, sequenceid=170, filesize=11.9 K 2024-11-17T01:29:17,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1259ms, sequenceid=170, compaction requested=true 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:17,878 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:17,878 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:17,878 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 119551 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:17,879 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:17,879 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:17,879 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f28a408f5a494db0809833b4c541915b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.9 K 2024-11-17T01:29:17,879 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=116.7 K 2024-11-17T01:29:17,879 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea] 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f28a408f5a494db0809833b4c541915b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953836 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3a7a0eda355b4e47a48c4803565d27c3, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953836 2024-11-17T01:29:17,879 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 1afce19dce8c436abbe3be507de1034d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731806954454 2024-11-17T01:29:17,880 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 465cac198c484749ad33f646573a87c7, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731806954454 2024-11-17T01:29:17,880 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bb5bde7114664aa4a48e0f9e130ec51f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956617 2024-11-17T01:29:17,880 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84b5a88fa40d4e4889aca4611206b9ea, keycount=300, bloomtype=ROW, size=55.7 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956596 2024-11-17T01:29:17,884 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:17,885 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#456 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:17,885 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111729647bc10c16477e9360e1103fc971fe_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:17,885 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e250530aeebd4784b3ab465f48345b66 is 50, key is test_row_0/B:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:17,887 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111729647bc10c16477e9360e1103fc971fe_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:17,887 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111729647bc10c16477e9360e1103fc971fe_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742366_1542 (size=12561) 2024-11-17T01:29:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742367_1543 (size=4469) 2024-11-17T01:29:17,890 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#457 average throughput is 4.07 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:17,891 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/ee7708287aa942669cf855d060f7fac7 is 175, key is test_row_0/A:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:17,892 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e250530aeebd4784b3ab465f48345b66 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e250530aeebd4784b3ab465f48345b66 2024-11-17T01:29:17,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742368_1544 (size=31515) 2024-11-17T01:29:17,895 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into e250530aeebd4784b3ab465f48345b66(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:17,895 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:17,895 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806957878; duration=0sec 2024-11-17T01:29:17,895 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:17,895 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:17,895 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:17,896 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36761 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:17,896 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:17,896 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:17,896 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/baf19623be4447868df6e0826540e61b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=35.9 K 2024-11-17T01:29:17,896 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting baf19623be4447868df6e0826540e61b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731806953836 2024-11-17T01:29:17,897 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 2a77d674bc044f059b1e61d14d83d6fa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731806954454 2024-11-17T01:29:17,897 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e14f6c3acd044f8f9370926b4aa88919, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956617 2024-11-17T01:29:17,897 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/ee7708287aa942669cf855d060f7fac7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7 2024-11-17T01:29:17,901 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into ee7708287aa942669cf855d060f7fac7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:17,901 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:17,901 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806957878; duration=0sec 2024-11-17T01:29:17,901 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:17,901 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:17,906 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#458 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:17,906 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/306d3feadf3d43afadd075ad0eb263cb is 50, key is test_row_0/C:col10/1731806956618/Put/seqid=0 2024-11-17T01:29:17,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742369_1545 (size=12561) 2024-11-17T01:29:17,912 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/306d3feadf3d43afadd075ad0eb263cb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/306d3feadf3d43afadd075ad0eb263cb 2024-11-17T01:29:17,915 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into 306d3feadf3d43afadd075ad0eb263cb(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:17,915 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:17,915 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806957878; duration=0sec 2024-11-17T01:29:17,915 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:17,915 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:18,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-17T01:29:18,588 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-17T01:29:18,589 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:18,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-17T01:29:18,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:18,590 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:18,590 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:18,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:18,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:18,741 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,742 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:18,742 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:18,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:18,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117c69db0b11961417ca2150e56b88cc075_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806956651/Put/seqid=0 2024-11-17T01:29:18,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742370_1546 (size=12304) 2024-11-17T01:29:18,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:18,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:18,809 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807018777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807018809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807018809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807018809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807018810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:18,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807018913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807018913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807018913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:18,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:18,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807018914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807019116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807019117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,118 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807019117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807019117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:19,153 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117c69db0b11961417ca2150e56b88cc075_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c69db0b11961417ca2150e56b88cc075_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:19,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/931a437f9154422eb9ee0d3c8cf3cfbb, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:19,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/931a437f9154422eb9ee0d3c8cf3cfbb is 175, key is test_row_0/A:col10/1731806956651/Put/seqid=0 2024-11-17T01:29:19,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742371_1547 (size=31105) 2024-11-17T01:29:19,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:19,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807019419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807019419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807019420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,422 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807019420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,557 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=197, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/931a437f9154422eb9ee0d3c8cf3cfbb 2024-11-17T01:29:19,562 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/7f621d3c6bd14d8c881312a0c5ea0afa is 50, key is test_row_0/B:col10/1731806956651/Put/seqid=0 2024-11-17T01:29:19,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742372_1548 (size=12151) 2024-11-17T01:29:19,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807019923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807019923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807019923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,926 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:19,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807019924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:19,965 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/7f621d3c6bd14d8c881312a0c5ea0afa 2024-11-17T01:29:19,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/3b26f5b6b8d640b2b4875a781c7a1b75 is 50, key is test_row_0/C:col10/1731806956651/Put/seqid=0 2024-11-17T01:29:19,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742373_1549 (size=12151) 2024-11-17T01:29:20,374 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/3b26f5b6b8d640b2b4875a781c7a1b75 2024-11-17T01:29:20,377 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/931a437f9154422eb9ee0d3c8cf3cfbb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb 2024-11-17T01:29:20,379 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb, entries=150, sequenceid=197, filesize=30.4 K 2024-11-17T01:29:20,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/7f621d3c6bd14d8c881312a0c5ea0afa as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa 2024-11-17T01:29:20,382 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa, entries=150, sequenceid=197, filesize=11.9 K 2024-11-17T01:29:20,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/3b26f5b6b8d640b2b4875a781c7a1b75 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75 2024-11-17T01:29:20,385 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75, entries=150, sequenceid=197, filesize=11.9 K 2024-11-17T01:29:20,385 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1643ms, sequenceid=197, compaction requested=false 2024-11-17T01:29:20,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:20,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:20,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-17T01:29:20,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-17T01:29:20,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-17T01:29:20,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7960 sec 2024-11-17T01:29:20,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.7990 sec 2024-11-17T01:29:20,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-17T01:29:20,693 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-17T01:29:20,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:20,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-17T01:29:20,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:20,696 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:20,696 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:20,696 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:20,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:20,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:20,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:20,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:20,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f81da28156dc4fd3ab1594c01dcdc8b8_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:20,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742374_1550 (size=12304) 2024-11-17T01:29:20,847 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:20,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:20,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:20,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:20,870 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807020867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807020927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,930 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807020929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807020929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,932 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807020931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:20,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807020971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:20,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:20,999 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,000 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,152 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,152 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,152 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:21,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807021179, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,220 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:21,223 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117f81da28156dc4fd3ab1594c01dcdc8b8_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f81da28156dc4fd3ab1594c01dcdc8b8_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:21,223 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/10a4d2ff997e4da1a967a24e95309349, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:21,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/10a4d2ff997e4da1a967a24e95309349 is 175, key is test_row_0/A:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:21,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742375_1551 (size=31105) 2024-11-17T01:29:21,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:21,304 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,304 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,304 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,305 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,305 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,456 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,456 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,457 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,457 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,485 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:21,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807021484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,608 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,609 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,609 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,627 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=211, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/10a4d2ff997e4da1a967a24e95309349 2024-11-17T01:29:21,632 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f14f10a666db4fd5a7377ed5c584a39f is 50, key is test_row_0/B:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:21,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742376_1552 (size=12151) 2024-11-17T01:29:21,760 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,761 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,761 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:21,913 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:21,913 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:21,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:21,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:21,914 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,914 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:21,990 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:21,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807021988, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,035 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f14f10a666db4fd5a7377ed5c584a39f 2024-11-17T01:29:22,040 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/251f1ad348c54b1f878302c467e12801 is 50, key is test_row_0/C:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:22,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742377_1553 (size=12151) 2024-11-17T01:29:22,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:22,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:22,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,066 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,217 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:22,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:22,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,218 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,369 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,370 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,370 ERROR [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-17T01:29:22,443 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/251f1ad348c54b1f878302c467e12801 2024-11-17T01:29:22,445 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/10a4d2ff997e4da1a967a24e95309349 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349 2024-11-17T01:29:22,448 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349, entries=150, sequenceid=211, filesize=30.4 K 2024-11-17T01:29:22,448 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/f14f10a666db4fd5a7377ed5c584a39f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f 2024-11-17T01:29:22,451 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f, entries=150, sequenceid=211, filesize=11.9 K 2024-11-17T01:29:22,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/251f1ad348c54b1f878302c467e12801 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801 2024-11-17T01:29:22,454 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801, entries=150, sequenceid=211, filesize=11.9 K 2024-11-17T01:29:22,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1642ms, sequenceid=211, compaction requested=true 2024-11-17T01:29:22,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:22,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:22,454 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:22,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:22,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:22,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:22,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93725 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:22,455 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:22,455 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,455 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,455 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=91.5 K 2024-11-17T01:29:22,455 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e250530aeebd4784b3ab465f48345b66, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=36.0 K 2024-11-17T01:29:22,455 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349] 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e250530aeebd4784b3ab465f48345b66, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956617 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting ee7708287aa942669cf855d060f7fac7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956617 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f621d3c6bd14d8c881312a0c5ea0afa, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806956648 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting f14f10a666db4fd5a7377ed5c584a39f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 931a437f9154422eb9ee0d3c8cf3cfbb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806956648 2024-11-17T01:29:22,456 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10a4d2ff997e4da1a967a24e95309349, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:22,461 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#465 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:22,461 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/0c5be38ab4414dd18ed2bddc420de1d3 is 50, key is test_row_0/B:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:22,462 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:22,475 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241117e6032f92276e452b82d2621055d356c1_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:22,477 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241117e6032f92276e452b82d2621055d356c1_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:22,477 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117e6032f92276e452b82d2621055d356c1_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:22,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742378_1554 (size=12663) 2024-11-17T01:29:22,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742379_1555 (size=4469) 2024-11-17T01:29:22,481 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#466 average throughput is 1.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:22,481 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/de5722378100410e86217ca00e2c3fd9 is 175, key is test_row_0/A:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:22,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742380_1556 (size=31617) 2024-11-17T01:29:22,488 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/de5722378100410e86217ca00e2c3fd9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9 2024-11-17T01:29:22,491 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into de5722378100410e86217ca00e2c3fd9(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:22,491 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:22,491 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806962454; duration=0sec 2024-11-17T01:29:22,491 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:22,491 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:22,491 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:22,492 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:22,492 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:22,492 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,492 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/306d3feadf3d43afadd075ad0eb263cb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=36.0 K 2024-11-17T01:29:22,492 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 306d3feadf3d43afadd075ad0eb263cb, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731806956617 2024-11-17T01:29:22,493 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3b26f5b6b8d640b2b4875a781c7a1b75, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731806956648 2024-11-17T01:29:22,493 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting 251f1ad348c54b1f878302c467e12801, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:22,498 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#467 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:22,499 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/58120f85fd4541608d3f097d4d5f2d32 is 50, key is test_row_0/C:col10/1731806958809/Put/seqid=0 2024-11-17T01:29:22,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742381_1557 (size=12663) 2024-11-17T01:29:22,522 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,522 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-17T01:29:22,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:22,522 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-17T01:29:22,522 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:22,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:22,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:22,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:22,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:22,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:22,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117dd2626ebedb54897991562b27b06ada7_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806960848/Put/seqid=0 2024-11-17T01:29:22,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742382_1558 (size=12304) 2024-11-17T01:29:22,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:22,882 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/0c5be38ab4414dd18ed2bddc420de1d3 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/0c5be38ab4414dd18ed2bddc420de1d3 2024-11-17T01:29:22,886 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into 0c5be38ab4414dd18ed2bddc420de1d3(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:22,886 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:22,886 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806962455; duration=0sec 2024-11-17T01:29:22,886 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:22,886 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:22,905 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/58120f85fd4541608d3f097d4d5f2d32 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/58120f85fd4541608d3f097d4d5f2d32 2024-11-17T01:29:22,908 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into 58120f85fd4541608d3f097d4d5f2d32(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:22,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:22,908 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806962455; duration=0sec 2024-11-17T01:29:22,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:22,908 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:22,930 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:22,933 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241117dd2626ebedb54897991562b27b06ada7_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117dd2626ebedb54897991562b27b06ada7_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:22,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/b534ae7d127e476d9ce0464bc678f004, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:22,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/b534ae7d127e476d9ce0464bc678f004 is 175, key is test_row_0/A:col10/1731806960848/Put/seqid=0 2024-11-17T01:29:22,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742383_1559 (size=31105) 2024-11-17T01:29:22,937 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=234, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/b534ae7d127e476d9ce0464bc678f004 2024-11-17T01:29:22,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:22,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:22,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e7b22f338f3849fcb40c66c29d2e1567 is 50, key is test_row_0/B:col10/1731806960848/Put/seqid=0 2024-11-17T01:29:22,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742384_1560 (size=12151) 2024-11-17T01:29:22,945 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e7b22f338f3849fcb40c66c29d2e1567 2024-11-17T01:29:22,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/34a6661b51e4415496ad8c8030b041ad is 50, key is test_row_0/C:col10/1731806960848/Put/seqid=0 2024-11-17T01:29:22,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:22,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47496 deadline: 1731807022948, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,951 DEBUG [Thread-2216 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4142 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., hostname=04f7e7347dc7,37721,1731806791503, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-17T01:29:22,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807022950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807022951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:22,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807022951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:22,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742385_1561 (size=12151) 2024-11-17T01:29:22,997 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:22,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807022997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,054 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807023053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807023053, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,055 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807023054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807023256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807023256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807023256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,354 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/34a6661b51e4415496ad8c8030b041ad 2024-11-17T01:29:23,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/b534ae7d127e476d9ce0464bc678f004 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004 2024-11-17T01:29:23,359 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004, entries=150, sequenceid=234, filesize=30.4 K 2024-11-17T01:29:23,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/e7b22f338f3849fcb40c66c29d2e1567 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567 2024-11-17T01:29:23,362 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567, entries=150, sequenceid=234, filesize=11.9 K 2024-11-17T01:29:23,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/34a6661b51e4415496ad8c8030b041ad as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad 2024-11-17T01:29:23,364 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad, entries=150, sequenceid=234, filesize=11.9 K 2024-11-17T01:29:23,367 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 3f71bb4db1b15a8af73d9278bb8c8221 in 845ms, sequenceid=234, compaction requested=false 2024-11-17T01:29:23,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:23,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:23,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-17T01:29:23,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-17T01:29:23,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-17T01:29:23,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6720 sec 2024-11-17T01:29:23,369 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 2.6750 sec 2024-11-17T01:29:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:23,561 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-17T01:29:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:23,562 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:23,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111715a9807cf34a46bab01926a2c208c16e_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:23,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742386_1562 (size=14794) 2024-11-17T01:29:23,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807023576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807023578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807023578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807023679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807023681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,683 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807023681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807023883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,885 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807023883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:23,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807023884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:23,972 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:23,974 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111715a9807cf34a46bab01926a2c208c16e_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111715a9807cf34a46bab01926a2c208c16e_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:23,975 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/feee9abdd3e74156900c6eb46d1e498d, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:23,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/feee9abdd3e74156900c6eb46d1e498d is 175, key is test_row_0/A:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:23,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742387_1563 (size=39749) 2024-11-17T01:29:24,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807024186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807024187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807024187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,379 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=31.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/feee9abdd3e74156900c6eb46d1e498d 2024-11-17T01:29:24,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/99a8d902a4b24261ad9f1a5f29903c95 is 50, key is test_row_0/B:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:24,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742388_1564 (size=12151) 2024-11-17T01:29:24,387 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/99a8d902a4b24261ad9f1a5f29903c95 2024-11-17T01:29:24,393 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/bc0c0309f4bf45c197d100aa1cd10fb6 is 50, key is test_row_0/C:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:24,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742389_1565 (size=12151) 2024-11-17T01:29:24,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807024692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807024692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807024694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,796 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/bc0c0309f4bf45c197d100aa1cd10fb6 2024-11-17T01:29:24,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/feee9abdd3e74156900c6eb46d1e498d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d 2024-11-17T01:29:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-17T01:29:24,799 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-17T01:29:24,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-17T01:29:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-17T01:29:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:24,801 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-17T01:29:24,802 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-17T01:29:24,802 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-17T01:29:24,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d, entries=200, sequenceid=253, filesize=38.8 K 2024-11-17T01:29:24,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/99a8d902a4b24261ad9f1a5f29903c95 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95 2024-11-17T01:29:24,808 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95, entries=150, sequenceid=253, filesize=11.9 K 2024-11-17T01:29:24,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/bc0c0309f4bf45c197d100aa1cd10fb6 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6 2024-11-17T01:29:24,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6, entries=150, sequenceid=253, filesize=11.9 K 2024-11-17T01:29:24,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1250ms, sequenceid=253, compaction requested=true 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:24,812 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:24,812 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:24,812 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:24,812 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:24,812 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/A is initiating minor compaction (all files) 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/B is initiating minor compaction (all files) 2024-11-17T01:29:24,813 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/B in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:24,813 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/A in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:24,813 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=100.1 K 2024-11-17T01:29:24,813 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/0c5be38ab4414dd18ed2bddc420de1d3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=36.1 K 2024-11-17T01:29:24,813 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. files: [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d] 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c5be38ab4414dd18ed2bddc420de1d3, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting de5722378100410e86217ca00e2c3fd9, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting e7b22f338f3849fcb40c66c29d2e1567, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806960848 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting b534ae7d127e476d9ce0464bc678f004, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806960848 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 99a8d902a4b24261ad9f1a5f29903c95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731806962949 2024-11-17T01:29:24,813 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] compactions.Compactor(224): Compacting feee9abdd3e74156900c6eb46d1e498d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731806962949 2024-11-17T01:29:24,817 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:24,819 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411170cbb286f9b124af2bbd405d424c80a4b_3f71bb4db1b15a8af73d9278bb8c8221 store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:24,819 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#B#compaction#475 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:24,819 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/a56cd65aded9463abe141226b4f0f041 is 50, key is test_row_0/B:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:24,820 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411170cbb286f9b124af2bbd405d424c80a4b_3f71bb4db1b15a8af73d9278bb8c8221, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:24,820 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411170cbb286f9b124af2bbd405d424c80a4b_3f71bb4db1b15a8af73d9278bb8c8221 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:24,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742390_1566 (size=12765) 2024-11-17T01:29:24,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742391_1567 (size=4469) 2024-11-17T01:29:24,824 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#A#compaction#474 average throughput is 3.49 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:24,825 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/6454ba61365d438392e56d578f4aa366 is 175, key is test_row_0/A:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:24,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742392_1568 (size=31719) 2024-11-17T01:29:24,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:24,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:24,954 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=37721 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:24,955 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:24,955 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:24,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411178845059692114f3ab53c367cba7ca496_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806963577/Put/seqid=0 2024-11-17T01:29:24,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742393_1569 (size=12454) 2024-11-17T01:29:24,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:24,969 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411178845059692114f3ab53c367cba7ca496_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178845059692114f3ab53c367cba7ca496_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:24,969 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/eaed112e6a9f4007962727bbc230b8e9, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:24,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/eaed112e6a9f4007962727bbc230b8e9 is 175, key is test_row_0/A:col10/1731806963577/Put/seqid=0 2024-11-17T01:29:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742394_1570 (size=31255) 2024-11-17T01:29:25,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. as already flushing 2024-11-17T01:29:25,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:25,043 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807025041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:25,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807025144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,176 DEBUG [Thread-2227 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3a3b66d3 to 127.0.0.1:63898 2024-11-17T01:29:25,176 DEBUG [Thread-2227 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:25,176 DEBUG [Thread-2225 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x150e08ed to 127.0.0.1:63898 2024-11-17T01:29:25,176 DEBUG [Thread-2225 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:25,176 DEBUG [Thread-2221 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b914bf4 to 127.0.0.1:63898 2024-11-17T01:29:25,176 DEBUG [Thread-2221 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:25,178 DEBUG [Thread-2229 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5cfdf76c to 127.0.0.1:63898 2024-11-17T01:29:25,178 DEBUG [Thread-2229 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:25,179 DEBUG [Thread-2223 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3f6a59e4 to 127.0.0.1:63898 2024-11-17T01:29:25,179 DEBUG [Thread-2223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:25,228 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/a56cd65aded9463abe141226b4f0f041 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/a56cd65aded9463abe141226b4f0f041 2024-11-17T01:29:25,233 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/6454ba61365d438392e56d578f4aa366 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/6454ba61365d438392e56d578f4aa366 2024-11-17T01:29:25,234 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/B of 3f71bb4db1b15a8af73d9278bb8c8221 into a56cd65aded9463abe141226b4f0f041(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:25,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:25,234 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/B, priority=13, startTime=1731806964812; duration=0sec 2024-11-17T01:29:25,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:25,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:25,234 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-17T01:29:25,235 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-17T01:29:25,235 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1540): 3f71bb4db1b15a8af73d9278bb8c8221/C is initiating minor compaction (all files) 2024-11-17T01:29:25,235 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 3f71bb4db1b15a8af73d9278bb8c8221/C in TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:25,235 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/58120f85fd4541608d3f097d4d5f2d32, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6] into tmpdir=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp, totalSize=36.1 K 2024-11-17T01:29:25,235 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 58120f85fd4541608d3f097d4d5f2d32, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1731806958781 2024-11-17T01:29:25,236 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting 34a6661b51e4415496ad8c8030b041ad, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1731806960848 2024-11-17T01:29:25,236 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] compactions.Compactor(224): Compacting bc0c0309f4bf45c197d100aa1cd10fb6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731806962949 2024-11-17T01:29:25,237 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/A of 3f71bb4db1b15a8af73d9278bb8c8221 into 6454ba61365d438392e56d578f4aa366(size=31.0 K), total size for store is 31.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:25,237 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:25,237 INFO [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/A, priority=13, startTime=1731806964812; duration=0sec 2024-11-17T01:29:25,237 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:25,237 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:25,241 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3f71bb4db1b15a8af73d9278bb8c8221#C#compaction#477 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-17T01:29:25,241 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/196bdf2924204955b6e3378740d0bb1a is 50, key is test_row_0/C:col10/1731806962949/Put/seqid=0 2024-11-17T01:29:25,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742395_1571 (size=12765) 2024-11-17T01:29:25,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807025347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,374 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=272, memsize=35.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/eaed112e6a9f4007962727bbc230b8e9 2024-11-17T01:29:25,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/2711a853ce2b48118effbcd10bbdae4d is 50, key is test_row_0/B:col10/1731806963577/Put/seqid=0 2024-11-17T01:29:25,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742396_1572 (size=12301) 2024-11-17T01:29:25,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:25,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807025652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,656 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/196bdf2924204955b6e3378740d0bb1a as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/196bdf2924204955b6e3378740d0bb1a 2024-11-17T01:29:25,662 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 3f71bb4db1b15a8af73d9278bb8c8221/C of 3f71bb4db1b15a8af73d9278bb8c8221 into 196bdf2924204955b6e3378740d0bb1a(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-17T01:29:25,662 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:25,662 INFO [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221., storeName=3f71bb4db1b15a8af73d9278bb8c8221/C, priority=13, startTime=1731806964812; duration=0sec 2024-11-17T01:29:25,662 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:25,662 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:25,696 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47494 deadline: 1731807025696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47462 deadline: 1731807025700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,705 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:25,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47488 deadline: 1731807025705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:25,791 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/2711a853ce2b48118effbcd10bbdae4d 2024-11-17T01:29:25,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1f42465192a84a8d8220de49c3a3eff7 is 50, key is test_row_0/C:col10/1731806963577/Put/seqid=0 2024-11-17T01:29:25,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742397_1573 (size=12301) 2024-11-17T01:29:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:26,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-17T01:29:26,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:47434 deadline: 1731807026156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:26,230 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=272 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1f42465192a84a8d8220de49c3a3eff7 2024-11-17T01:29:26,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/eaed112e6a9f4007962727bbc230b8e9 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/eaed112e6a9f4007962727bbc230b8e9 2024-11-17T01:29:26,244 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/eaed112e6a9f4007962727bbc230b8e9, entries=150, sequenceid=272, filesize=30.5 K 2024-11-17T01:29:26,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/2711a853ce2b48118effbcd10bbdae4d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/2711a853ce2b48118effbcd10bbdae4d 2024-11-17T01:29:26,249 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/2711a853ce2b48118effbcd10bbdae4d, entries=150, sequenceid=272, filesize=12.0 K 2024-11-17T01:29:26,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1f42465192a84a8d8220de49c3a3eff7 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1f42465192a84a8d8220de49c3a3eff7 2024-11-17T01:29:26,253 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1f42465192a84a8d8220de49c3a3eff7, entries=150, sequenceid=272, filesize=12.0 K 2024-11-17T01:29:26,254 INFO [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1299ms, sequenceid=272, compaction requested=false 2024-11-17T01:29:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:26,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/04f7e7347dc7:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-17T01:29:26,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-17T01:29:26,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-17T01:29:26,257 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4530 sec 2024-11-17T01:29:26,258 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 1.4570 sec 2024-11-17T01:29:26,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-17T01:29:26,907 INFO [Thread-2220 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-17T01:29:26,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=37721 {}] regionserver.HRegion(8581): Flush requested on 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:26,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-17T01:29:26,987 DEBUG [Thread-2216 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7181df3b to 127.0.0.1:63898 2024-11-17T01:29:26,987 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:26,988 DEBUG [Thread-2216 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:26,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:26,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:26,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:26,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:26,989 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:26,997 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411174c5ef617269d48a590cd0805d9814b3b_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_0/A:col10/1731806966984/Put/seqid=0 2024-11-17T01:29:27,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742398_1574 (size=12454) 2024-11-17T01:29:27,167 DEBUG [Thread-2214 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x05bc9c3e to 127.0.0.1:63898 2024-11-17T01:29:27,167 DEBUG [Thread-2214 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:27,402 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:27,410 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411174c5ef617269d48a590cd0805d9814b3b_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174c5ef617269d48a590cd0805d9814b3b_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:27,411 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/d8e78e729eae4abdabaa68935b41d798, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:27,412 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/d8e78e729eae4abdabaa68935b41d798 is 175, key is test_row_0/A:col10/1731806966984/Put/seqid=0 2024-11-17T01:29:27,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742399_1575 (size=31255) 2024-11-17T01:29:27,719 DEBUG [Thread-2210 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5765d46a to 127.0.0.1:63898 2024-11-17T01:29:27,719 DEBUG [Thread-2210 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:27,720 DEBUG [Thread-2218 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11030ef5 to 127.0.0.1:63898 2024-11-17T01:29:27,720 DEBUG [Thread-2218 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:27,723 DEBUG [Thread-2212 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ac53e79 to 127.0.0.1:63898 2024-11-17T01:29:27,723 DEBUG [Thread-2212 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 39 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 34 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6809 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6737 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6601 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6819 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 6741 2024-11-17T01:29:27,723 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-17T01:29:27,723 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:29:27,724 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1cbd2497 to 127.0.0.1:63898 2024-11-17T01:29:27,724 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:27,724 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-17T01:29:27,725 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-17T01:29:27,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:27,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:27,728 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806967727"}]},"ts":"1731806967727"} 2024-11-17T01:29:27,729 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-17T01:29:27,773 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-17T01:29:27,774 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-17T01:29:27,775 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, UNASSIGN}] 2024-11-17T01:29:27,775 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, UNASSIGN 2024-11-17T01:29:27,776 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=173 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=CLOSING, regionLocation=04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:27,777 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-17T01:29:27,777 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; CloseRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503}] 2024-11-17T01:29:27,818 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=293, memsize=33.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/d8e78e729eae4abdabaa68935b41d798 2024-11-17T01:29:27,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:27,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/10c16775029b4393929d2e6c1c113341 is 50, key is test_row_0/B:col10/1731806966984/Put/seqid=0 2024-11-17T01:29:27,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742400_1576 (size=12301) 2024-11-17T01:29:27,928 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:27,930 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(124): Close 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:27,930 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-17T01:29:27,930 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1681): Closing 3f71bb4db1b15a8af73d9278bb8c8221, disabling compactions & flushes 2024-11-17T01:29:27,930 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:28,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:28,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/10c16775029b4393929d2e6c1c113341 2024-11-17T01:29:28,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2dbd993aef5344a192e66cb5dbee001f is 50, key is test_row_0/C:col10/1731806966984/Put/seqid=0 2024-11-17T01:29:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742401_1577 (size=12301) 2024-11-17T01:29:28,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:28,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2dbd993aef5344a192e66cb5dbee001f 2024-11-17T01:29:28,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/d8e78e729eae4abdabaa68935b41d798 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/d8e78e729eae4abdabaa68935b41d798 2024-11-17T01:29:28,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/d8e78e729eae4abdabaa68935b41d798, entries=150, sequenceid=293, filesize=30.5 K 2024-11-17T01:29:28,664 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/10c16775029b4393929d2e6c1c113341 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/10c16775029b4393929d2e6c1c113341 2024-11-17T01:29:28,666 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/10c16775029b4393929d2e6c1c113341, entries=150, sequenceid=293, filesize=12.0 K 2024-11-17T01:29:28,667 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/2dbd993aef5344a192e66cb5dbee001f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2dbd993aef5344a192e66cb5dbee001f 2024-11-17T01:29:28,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2dbd993aef5344a192e66cb5dbee001f, entries=150, sequenceid=293, filesize=12.0 K 2024-11-17T01:29:28,670 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=26.84 KB/27480 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1683ms, sequenceid=293, compaction requested=true 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:28,670 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:A, priority=-2147483648, current under compaction store size is 1 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:28,670 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:B, priority=-2147483648, current under compaction store size is 2 2024-11-17T01:29:28,670 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. after waiting 0 ms 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. because compaction request was cancelled 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-17T01:29:28,670 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. because compaction request was cancelled 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3f71bb4db1b15a8af73d9278bb8c8221:C, priority=-2147483648, current under compaction store size is 3 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:A 2024-11-17T01:29:28,670 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:B 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. because compaction request was cancelled 2024-11-17T01:29:28,670 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(2837): Flushing 3f71bb4db1b15a8af73d9278bb8c8221 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-17T01:29:28,670 DEBUG [RS:0;04f7e7347dc7:37721-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3f71bb4db1b15a8af73d9278bb8c8221:C 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=A 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=B 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 3f71bb4db1b15a8af73d9278bb8c8221, store=C 2024-11-17T01:29:28,671 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-17T01:29:28,675 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111774bd8444130d44dcb6e3c002079d7297_3f71bb4db1b15a8af73d9278bb8c8221 is 50, key is test_row_1/A:col10/1731806967716/Put/seqid=0 2024-11-17T01:29:28,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742402_1578 (size=9914) 2024-11-17T01:29:28,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:29,079 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-17T01:29:29,089 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111774bd8444130d44dcb6e3c002079d7297_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111774bd8444130d44dcb6e3c002079d7297_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:29,090 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1b88ffc5cad94726b868960046649855, store: [table=TestAcidGuarantees family=A region=3f71bb4db1b15a8af73d9278bb8c8221] 2024-11-17T01:29:29,091 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1b88ffc5cad94726b868960046649855 is 175, key is test_row_1/A:col10/1731806967716/Put/seqid=0 2024-11-17T01:29:29,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742403_1579 (size=22561) 2024-11-17T01:29:29,498 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=300, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1b88ffc5cad94726b868960046649855 2024-11-17T01:29:29,511 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/6416aeb56fa94f02abbee6488974ac62 is 50, key is test_row_1/B:col10/1731806967716/Put/seqid=0 2024-11-17T01:29:29,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742404_1580 (size=9857) 2024-11-17T01:29:29,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:29,917 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/6416aeb56fa94f02abbee6488974ac62 2024-11-17T01:29:29,927 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1aac97c2a7bd4f7ca991878f1ea89908 is 50, key is test_row_1/C:col10/1731806967716/Put/seqid=0 2024-11-17T01:29:29,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742405_1581 (size=9857) 2024-11-17T01:29:29,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-17T01:29:30,332 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1aac97c2a7bd4f7ca991878f1ea89908 2024-11-17T01:29:30,339 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/A/1b88ffc5cad94726b868960046649855 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1b88ffc5cad94726b868960046649855 2024-11-17T01:29:30,344 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1b88ffc5cad94726b868960046649855, entries=100, sequenceid=300, filesize=22.0 K 2024-11-17T01:29:30,345 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/B/6416aeb56fa94f02abbee6488974ac62 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/6416aeb56fa94f02abbee6488974ac62 2024-11-17T01:29:30,348 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/6416aeb56fa94f02abbee6488974ac62, entries=100, sequenceid=300, filesize=9.6 K 2024-11-17T01:29:30,349 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/.tmp/C/1aac97c2a7bd4f7ca991878f1ea89908 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1aac97c2a7bd4f7ca991878f1ea89908 2024-11-17T01:29:30,351 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1aac97c2a7bd4f7ca991878f1ea89908, entries=100, sequenceid=300, filesize=9.6 K 2024-11-17T01:29:30,352 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 3f71bb4db1b15a8af73d9278bb8c8221 in 1682ms, sequenceid=300, compaction requested=true 2024-11-17T01:29:30,352 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d] to archive 2024-11-17T01:29:30,353 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:30,355 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/11f6e67a6ba14799bb71bc07049dcc2c 2024-11-17T01:29:30,356 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/a6f905cce5594080b78ec93438384b26 2024-11-17T01:29:30,356 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/12f23ea68d3c47b985264408e224ae99 2024-11-17T01:29:30,357 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1bc25b13ede44b97b13f82534bea2c79 2024-11-17T01:29:30,358 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/02ac3ca2b421463ca9e0b5a473938967 2024-11-17T01:29:30,359 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/daf4b569cc504af6913c3406583989e1 2024-11-17T01:29:30,360 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/f2321d5507894ea38d1f3dc30dd23a29 2024-11-17T01:29:30,360 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/669c147adf484c218eacd42506a05438 2024-11-17T01:29:30,361 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/89ec4228e6df4d35aac0ca1d4482a035 2024-11-17T01:29:30,362 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/3a7a0eda355b4e47a48c4803565d27c3 2024-11-17T01:29:30,363 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/465cac198c484749ad33f646573a87c7 2024-11-17T01:29:30,363 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/84b5a88fa40d4e4889aca4611206b9ea 2024-11-17T01:29:30,364 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/ee7708287aa942669cf855d060f7fac7 2024-11-17T01:29:30,365 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/931a437f9154422eb9ee0d3c8cf3cfbb 2024-11-17T01:29:30,365 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/de5722378100410e86217ca00e2c3fd9 2024-11-17T01:29:30,366 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/10a4d2ff997e4da1a967a24e95309349 2024-11-17T01:29:30,367 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/b534ae7d127e476d9ce0464bc678f004 2024-11-17T01:29:30,367 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/feee9abdd3e74156900c6eb46d1e498d 2024-11-17T01:29:30,368 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/4fcdb6371111463183916eae7c329892, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5817f111a15f4179b42763bb59aceae4, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f28a408f5a494db0809833b4c541915b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e250530aeebd4784b3ab465f48345b66, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/0c5be38ab4414dd18ed2bddc420de1d3, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95] to archive 2024-11-17T01:29:30,369 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:30,370 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e8d440bdcd3741458629e36322342c44 2024-11-17T01:29:30,371 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/d358d4eb1cf644d4b871012b080fbe5e 2024-11-17T01:29:30,371 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/4fcdb6371111463183916eae7c329892 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/4fcdb6371111463183916eae7c329892 2024-11-17T01:29:30,372 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/ce85bffea25e47f4a9733671cb214d9d 2024-11-17T01:29:30,373 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f4a11c6e21ed4d09a49b5f2fcc0f256e 2024-11-17T01:29:30,373 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5817f111a15f4179b42763bb59aceae4 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5817f111a15f4179b42763bb59aceae4 2024-11-17T01:29:30,374 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/dae306d255764bb2857888eb012f05f2 2024-11-17T01:29:30,375 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/5ad22ab46d1e49e798cd4f23ecb5565b 2024-11-17T01:29:30,375 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f28a408f5a494db0809833b4c541915b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f28a408f5a494db0809833b4c541915b 2024-11-17T01:29:30,376 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/15344626c1d8424e90bb5fb83275b3e8 2024-11-17T01:29:30,377 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/1afce19dce8c436abbe3be507de1034d 2024-11-17T01:29:30,377 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e250530aeebd4784b3ab465f48345b66 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e250530aeebd4784b3ab465f48345b66 2024-11-17T01:29:30,378 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/bb5bde7114664aa4a48e0f9e130ec51f 2024-11-17T01:29:30,379 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/7f621d3c6bd14d8c881312a0c5ea0afa 2024-11-17T01:29:30,379 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/0c5be38ab4414dd18ed2bddc420de1d3 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/0c5be38ab4414dd18ed2bddc420de1d3 2024-11-17T01:29:30,380 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/f14f10a666db4fd5a7377ed5c584a39f 2024-11-17T01:29:30,381 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/e7b22f338f3849fcb40c66c29d2e1567 2024-11-17T01:29:30,381 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/99a8d902a4b24261ad9f1a5f29903c95 2024-11-17T01:29:30,382 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/ceee3466e3ab4951a86a09e9bed08e5f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/c953c60bb5164f4f9e8ada2ae4248be7, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/baf19623be4447868df6e0826540e61b, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/306d3feadf3d43afadd075ad0eb263cb, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/58120f85fd4541608d3f097d4d5f2d32, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6] to archive 2024-11-17T01:29:30,382 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-17T01:29:30,383 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/10714605d6b245e1a44dfd4a9321a134 2024-11-17T01:29:30,384 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1735df6551304dcca4540fe0cb0ddfb2 2024-11-17T01:29:30,385 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/ceee3466e3ab4951a86a09e9bed08e5f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/ceee3466e3ab4951a86a09e9bed08e5f 2024-11-17T01:29:30,386 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/36783b211a274dc0becbf1db87535d0f 2024-11-17T01:29:30,386 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/5eda42ba3cb3434eb143bd6e57ee8b60 2024-11-17T01:29:30,387 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/c953c60bb5164f4f9e8ada2ae4248be7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/c953c60bb5164f4f9e8ada2ae4248be7 2024-11-17T01:29:30,387 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/713eaa5217f94dfeab4c4c9b717ca345 2024-11-17T01:29:30,388 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/6e73940d1b4240f68cfecef1b72f4cde 2024-11-17T01:29:30,389 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/baf19623be4447868df6e0826540e61b to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/baf19623be4447868df6e0826540e61b 2024-11-17T01:29:30,389 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/a35d475198b942c5920b138a7bd4447a 2024-11-17T01:29:30,390 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2a77d674bc044f059b1e61d14d83d6fa 2024-11-17T01:29:30,390 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/306d3feadf3d43afadd075ad0eb263cb to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/306d3feadf3d43afadd075ad0eb263cb 2024-11-17T01:29:30,391 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/e14f6c3acd044f8f9370926b4aa88919 2024-11-17T01:29:30,393 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/3b26f5b6b8d640b2b4875a781c7a1b75 2024-11-17T01:29:30,395 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/58120f85fd4541608d3f097d4d5f2d32 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/58120f85fd4541608d3f097d4d5f2d32 2024-11-17T01:29:30,395 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/251f1ad348c54b1f878302c467e12801 2024-11-17T01:29:30,397 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/34a6661b51e4415496ad8c8030b041ad 2024-11-17T01:29:30,398 DEBUG [StoreCloser-TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/bc0c0309f4bf45c197d100aa1cd10fb6 2024-11-17T01:29:30,402 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits/303.seqid, newMaxSeqId=303, maxSeqId=4 2024-11-17T01:29:30,402 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221. 2024-11-17T01:29:30,402 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] regionserver.HRegion(1635): Region close journal for 3f71bb4db1b15a8af73d9278bb8c8221: 2024-11-17T01:29:30,404 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION, pid=174}] handler.UnassignRegionHandler(170): Closed 3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:30,404 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=173 updating hbase:meta row=3f71bb4db1b15a8af73d9278bb8c8221, regionState=CLOSED 2024-11-17T01:29:30,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-17T01:29:30,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseRegionProcedure 3f71bb4db1b15a8af73d9278bb8c8221, server=04f7e7347dc7,37721,1731806791503 in 2.6280 sec 2024-11-17T01:29:30,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=173, resume processing ppid=172 2024-11-17T01:29:30,408 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, ppid=172, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=3f71bb4db1b15a8af73d9278bb8c8221, UNASSIGN in 2.6310 sec 2024-11-17T01:29:30,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-17T01:29:30,409 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.6340 sec 2024-11-17T01:29:30,410 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731806970410"}]},"ts":"1731806970410"} 2024-11-17T01:29:30,411 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-17T01:29:30,448 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-17T01:29:30,451 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.7240 sec 2024-11-17T01:29:31,174 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-17T01:29:31,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-17T01:29:31,836 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-17T01:29:31,838 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-17T01:29:31,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,841 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=175, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,843 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=175, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-17T01:29:31,845 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,850 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C, FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits] 2024-11-17T01:29:31,855 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1b88ffc5cad94726b868960046649855 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/1b88ffc5cad94726b868960046649855 2024-11-17T01:29:31,857 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/6454ba61365d438392e56d578f4aa366 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/6454ba61365d438392e56d578f4aa366 2024-11-17T01:29:31,859 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/d8e78e729eae4abdabaa68935b41d798 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/d8e78e729eae4abdabaa68935b41d798 2024-11-17T01:29:31,861 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/eaed112e6a9f4007962727bbc230b8e9 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/A/eaed112e6a9f4007962727bbc230b8e9 2024-11-17T01:29:31,864 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/10c16775029b4393929d2e6c1c113341 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/10c16775029b4393929d2e6c1c113341 2024-11-17T01:29:31,866 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/2711a853ce2b48118effbcd10bbdae4d to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/2711a853ce2b48118effbcd10bbdae4d 2024-11-17T01:29:31,867 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/6416aeb56fa94f02abbee6488974ac62 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/6416aeb56fa94f02abbee6488974ac62 2024-11-17T01:29:31,868 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/a56cd65aded9463abe141226b4f0f041 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/B/a56cd65aded9463abe141226b4f0f041 2024-11-17T01:29:31,869 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/196bdf2924204955b6e3378740d0bb1a to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/196bdf2924204955b6e3378740d0bb1a 2024-11-17T01:29:31,870 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1aac97c2a7bd4f7ca991878f1ea89908 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1aac97c2a7bd4f7ca991878f1ea89908 2024-11-17T01:29:31,870 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1f42465192a84a8d8220de49c3a3eff7 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/1f42465192a84a8d8220de49c3a3eff7 2024-11-17T01:29:31,871 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2dbd993aef5344a192e66cb5dbee001f to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/C/2dbd993aef5344a192e66cb5dbee001f 2024-11-17T01:29:31,873 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits/303.seqid to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221/recovered.edits/303.seqid 2024-11-17T01:29:31,873 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/default/TestAcidGuarantees/3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,873 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-17T01:29:31,873 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:29:31,874 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-17T01:29:31,876 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111715a9807cf34a46bab01926a2c208c16e_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111715a9807cf34a46bab01926a2c208c16e_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,876 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172ba75c23183f49998bf682d374d4d6f9_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172ba75c23183f49998bf682d374d4d6f9_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,877 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172e2be5597cb74bcb9930ad9afbed2318_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411172e2be5597cb74bcb9930ad9afbed2318_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,878 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411173ca97292768f4148af5d87c63b2120ca_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411173ca97292768f4148af5d87c63b2120ca_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,879 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117482418fd8ad3454ea55741d305358b40_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117482418fd8ad3454ea55741d305358b40_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,880 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174c5ef617269d48a590cd0805d9814b3b_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411174c5ef617269d48a590cd0805d9814b3b_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,881 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111768912b4298594c58a49cbe9541d56c7c_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111768912b4298594c58a49cbe9541d56c7c_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,882 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111774bd8444130d44dcb6e3c002079d7297_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111774bd8444130d44dcb6e3c002079d7297_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,882 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178845059692114f3ab53c367cba7ca496_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411178845059692114f3ab53c367cba7ca496_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,883 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117902649763f174a6584261c33c87af170_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117902649763f174a6584261c33c87af170_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,884 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c69db0b11961417ca2150e56b88cc075_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117c69db0b11961417ca2150e56b88cc075_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,885 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117dd2626ebedb54897991562b27b06ada7_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117dd2626ebedb54897991562b27b06ada7_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,886 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117e423709eb4eb48b0a64a50099c556d44_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117e423709eb4eb48b0a64a50099c556d44_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,887 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea9904623e1a49729cde2ca0aba76b51_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ea9904623e1a49729cde2ca0aba76b51_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,888 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ee2d6c332b9a42c383a428e669ebbac2_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117ee2d6c332b9a42c383a428e669ebbac2_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,889 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f81da28156dc4fd3ab1594c01dcdc8b8_3f71bb4db1b15a8af73d9278bb8c8221 to hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241117f81da28156dc4fd3ab1594c01dcdc8b8_3f71bb4db1b15a8af73d9278bb8c8221 2024-11-17T01:29:31,889 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-17T01:29:31,891 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=175, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,892 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-17T01:29:31,894 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-17T01:29:31,895 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=175, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,895 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-17T01:29:31,895 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731806971895"}]},"ts":"9223372036854775807"} 2024-11-17T01:29:31,896 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-17T01:29:31,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 3f71bb4db1b15a8af73d9278bb8c8221, NAME => 'TestAcidGuarantees,,1731806941907.3f71bb4db1b15a8af73d9278bb8c8221.', STARTKEY => '', ENDKEY => ''}] 2024-11-17T01:29:31,896 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-17T01:29:31,896 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731806971896"}]},"ts":"9223372036854775807"} 2024-11-17T01:29:31,897 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-17T01:29:31,941 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=175, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-17T01:29:31,942 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 103 msec 2024-11-17T01:29:31,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33741 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-17T01:29:31,944 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-17T01:29:31,958 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239 (was 239), OpenFileDescriptor=456 (was 449) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=257 (was 298), ProcessCount=11 (was 11), AvailableMemoryMB=3827 (was 3843) 2024-11-17T01:29:31,958 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-17T01:29:31,958 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-17T01:29:31,958 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3771e354 to 127.0.0.1:63898 2024-11-17T01:29:31,958 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:31,958 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-17T01:29:31,958 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=634853251, stopped=false 2024-11-17T01:29:31,959 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=04f7e7347dc7,33741,1731806790757 2024-11-17T01:29:31,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T01:29:31,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-17T01:29:31,965 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-17T01:29:31,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:29:31,965 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:29:31,965 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:31,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T01:29:31,966 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-17T01:29:31,966 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '04f7e7347dc7,37721,1731806791503' ***** 2024-11-17T01:29:31,966 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-17T01:29:31,966 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-17T01:29:31,966 INFO [RS:0;04f7e7347dc7:37721 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-17T01:29:31,966 INFO [RS:0;04f7e7347dc7:37721 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-17T01:29:31,966 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-17T01:29:31,966 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(3579): Received CLOSE for 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1224): stopping server 04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:31,967 DEBUG [RS:0;04f7e7347dc7:37721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 60ebde759866ffdd749c0e1b676599ae, disabling compactions & flushes 2024-11-17T01:29:31,967 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:29:31,967 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:29:31,967 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740, 60ebde759866ffdd749c0e1b676599ae=hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae.} 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. after waiting 0 ms 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-17T01:29:31,967 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-17T01:29:31,967 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 60ebde759866ffdd749c0e1b676599ae 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-17T01:29:31,967 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-17T01:29:31,968 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-17T01:29:31,968 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:29:31,989 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/.tmp/info/b9894fb52b03469a808b46d51779249d is 45, key is default/info:d/1731806797188/Put/seqid=0 2024-11-17T01:29:31,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742406_1582 (size=5037) 2024-11-17T01:29:31,995 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/info/93a394391d964e9b8a30ea3888733d8c is 143, key is hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae./info:regioninfo/1731806796990/Put/seqid=0 2024-11-17T01:29:31,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742407_1583 (size=7725) 2024-11-17T01:29:32,032 INFO [regionserver/04f7e7347dc7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T01:29:32,168 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:29:32,357 INFO [regionserver/04f7e7347dc7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-17T01:29:32,357 INFO [regionserver/04f7e7347dc7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-17T01:29:32,368 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 60ebde759866ffdd749c0e1b676599ae 2024-11-17T01:29:32,394 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/.tmp/info/b9894fb52b03469a808b46d51779249d 2024-11-17T01:29:32,400 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/info/93a394391d964e9b8a30ea3888733d8c 2024-11-17T01:29:32,402 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/.tmp/info/b9894fb52b03469a808b46d51779249d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/info/b9894fb52b03469a808b46d51779249d 2024-11-17T01:29:32,407 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/info/b9894fb52b03469a808b46d51779249d, entries=2, sequenceid=6, filesize=4.9 K 2024-11-17T01:29:32,408 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 60ebde759866ffdd749c0e1b676599ae in 441ms, sequenceid=6, compaction requested=false 2024-11-17T01:29:32,412 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/namespace/60ebde759866ffdd749c0e1b676599ae/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-17T01:29:32,413 INFO [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:29:32,413 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 60ebde759866ffdd749c0e1b676599ae: 2024-11-17T01:29:32,413 DEBUG [RS_CLOSE_REGION-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1731806795685.60ebde759866ffdd749c0e1b676599ae. 2024-11-17T01:29:32,423 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/rep_barrier/22dfe61cb85e41a1ad37d1d29db6715f is 102, key is TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7./rep_barrier:/1731806830559/DeleteFamily/seqid=0 2024-11-17T01:29:32,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742408_1584 (size=6025) 2024-11-17T01:29:32,569 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-17T01:29:32,769 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-17T01:29:32,828 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/rep_barrier/22dfe61cb85e41a1ad37d1d29db6715f 2024-11-17T01:29:32,851 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/table/579f9d17cbb24054b277107af34d14f5 is 96, key is TestAcidGuarantees,,1731806797445.733401ed1ccb71c159e3f227c30cedc7./table:/1731806830559/DeleteFamily/seqid=0 2024-11-17T01:29:32,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742409_1585 (size=5942) 2024-11-17T01:29:32,969 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-11-17T01:29:32,969 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1603): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-17T01:29:32,970 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-17T01:29:33,170 DEBUG [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-17T01:29:33,255 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/table/579f9d17cbb24054b277107af34d14f5 2024-11-17T01:29:33,265 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/info/93a394391d964e9b8a30ea3888733d8c as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/info/93a394391d964e9b8a30ea3888733d8c 2024-11-17T01:29:33,272 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/info/93a394391d964e9b8a30ea3888733d8c, entries=22, sequenceid=93, filesize=7.5 K 2024-11-17T01:29:33,273 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/rep_barrier/22dfe61cb85e41a1ad37d1d29db6715f as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/rep_barrier/22dfe61cb85e41a1ad37d1d29db6715f 2024-11-17T01:29:33,276 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/rep_barrier/22dfe61cb85e41a1ad37d1d29db6715f, entries=6, sequenceid=93, filesize=5.9 K 2024-11-17T01:29:33,277 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/.tmp/table/579f9d17cbb24054b277107af34d14f5 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/table/579f9d17cbb24054b277107af34d14f5 2024-11-17T01:29:33,280 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/table/579f9d17cbb24054b277107af34d14f5, entries=9, sequenceid=93, filesize=5.8 K 2024-11-17T01:29:33,281 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 1314ms, sequenceid=93, compaction requested=false 2024-11-17T01:29:33,285 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-17T01:29:33,286 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-17T01:29:33,286 INFO [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-17T01:29:33,286 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-17T01:29:33,286 DEBUG [RS_CLOSE_META-regionserver/04f7e7347dc7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-17T01:29:33,370 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1250): stopping server 04f7e7347dc7,37721,1731806791503; all regions closed. 2024-11-17T01:29:33,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741834_1010 (size=26050) 2024-11-17T01:29:33,381 DEBUG [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/oldWALs 2024-11-17T01:29:33,381 INFO [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 04f7e7347dc7%2C37721%2C1731806791503.meta:.meta(num 1731806795426) 2024-11-17T01:29:33,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741832_1008 (size=13136034) 2024-11-17T01:29:33,386 DEBUG [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/oldWALs 2024-11-17T01:29:33,386 INFO [RS:0;04f7e7347dc7:37721 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 04f7e7347dc7%2C37721%2C1731806791503:(num 1731806794521) 2024-11-17T01:29:33,386 DEBUG [RS:0;04f7e7347dc7:37721 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:33,386 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.LeaseManager(133): Closed leases 2024-11-17T01:29:33,386 INFO [RS:0;04f7e7347dc7:37721 {}] hbase.ChoreService(370): Chore service for: regionserver/04f7e7347dc7:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS] on shutdown 2024-11-17T01:29:33,386 INFO [regionserver/04f7e7347dc7:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-17T01:29:33,387 INFO [RS:0;04f7e7347dc7:37721 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:37721 2024-11-17T01:29:33,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/04f7e7347dc7,37721,1731806791503 2024-11-17T01:29:33,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-17T01:29:33,432 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007fedc08f3790@1637f5a4 rejected from java.util.concurrent.ThreadPoolExecutor@53356906[Terminated, pool size = 0, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-17T01:29:33,440 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [04f7e7347dc7,37721,1731806791503] 2024-11-17T01:29:33,440 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 04f7e7347dc7,37721,1731806791503; numProcessing=1 2024-11-17T01:29:33,448 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/04f7e7347dc7,37721,1731806791503 already deleted, retry=false 2024-11-17T01:29:33,448 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 04f7e7347dc7,37721,1731806791503 expired; onlineServers=0 2024-11-17T01:29:33,448 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '04f7e7347dc7,33741,1731806790757' ***** 2024-11-17T01:29:33,448 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-17T01:29:33,449 DEBUG [M:0;04f7e7347dc7:33741 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b81c190, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=04f7e7347dc7/172.17.0.2:0 2024-11-17T01:29:33,449 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionServer(1224): stopping server 04f7e7347dc7,33741,1731806790757 2024-11-17T01:29:33,449 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionServer(1250): stopping server 04f7e7347dc7,33741,1731806790757; all regions closed. 2024-11-17T01:29:33,449 DEBUG [M:0;04f7e7347dc7:33741 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-17T01:29:33,449 DEBUG [M:0;04f7e7347dc7:33741 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-17T01:29:33,449 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-17T01:29:33,449 DEBUG [M:0;04f7e7347dc7:33741 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-17T01:29:33,449 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.large.0-1731806794145 {}] cleaner.HFileCleaner(306): Exit Thread[master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.large.0-1731806794145,5,FailOnTimeoutGroup] 2024-11-17T01:29:33,449 DEBUG [master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.small.0-1731806794148 {}] cleaner.HFileCleaner(306): Exit Thread[master/04f7e7347dc7:0:becomeActiveMaster-HFileCleaner.small.0-1731806794148,5,FailOnTimeoutGroup] 2024-11-17T01:29:33,450 INFO [M:0;04f7e7347dc7:33741 {}] hbase.ChoreService(370): Chore service for: master/04f7e7347dc7:0 had [] on shutdown 2024-11-17T01:29:33,450 DEBUG [M:0;04f7e7347dc7:33741 {}] master.HMaster(1733): Stopping service threads 2024-11-17T01:29:33,450 INFO [M:0;04f7e7347dc7:33741 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-17T01:29:33,450 ERROR [M:0;04f7e7347dc7:33741 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-17T01:29:33,451 INFO [M:0;04f7e7347dc7:33741 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-17T01:29:33,452 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-17T01:29:33,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-17T01:29:33,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-17T01:29:33,457 DEBUG [M:0;04f7e7347dc7:33741 {}] zookeeper.ZKUtil(347): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-17T01:29:33,457 WARN [M:0;04f7e7347dc7:33741 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-17T01:29:33,457 INFO [M:0;04f7e7347dc7:33741 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-17T01:29:33,457 INFO [M:0;04f7e7347dc7:33741 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-17T01:29:33,457 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-17T01:29:33,458 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:29:33,458 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:29:33,458 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-17T01:29:33,458 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:29:33,458 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-17T01:29:33,458 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=730.98 KB heapSize=895.55 KB 2024-11-17T01:29:33,478 DEBUG [M:0;04f7e7347dc7:33741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/836b461391a84bc9891ef3446bea5959 is 82, key is hbase:meta,,1/info:regioninfo/1731806795544/Put/seqid=0 2024-11-17T01:29:33,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742410_1586 (size=5672) 2024-11-17T01:29:33,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T01:29:33,540 INFO [RS:0;04f7e7347dc7:37721 {}] regionserver.HRegionServer(1307): Exiting; stopping=04f7e7347dc7,37721,1731806791503; zookeeper connection closed. 2024-11-17T01:29:33,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37721-0x10147b023e30001, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T01:29:33,541 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e3b510 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e3b510 2024-11-17T01:29:33,542 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-17T01:29:33,883 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2014 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/836b461391a84bc9891ef3446bea5959 2024-11-17T01:29:33,919 DEBUG [M:0;04f7e7347dc7:33741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2c5e776d4324af58669927508299d0d is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x98/proc:d/1731806945039/Put/seqid=0 2024-11-17T01:29:33,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742411_1587 (size=44392) 2024-11-17T01:29:34,324 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=730.43 KB at sequenceid=2014 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2c5e776d4324af58669927508299d0d 2024-11-17T01:29:34,333 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f2c5e776d4324af58669927508299d0d 2024-11-17T01:29:34,350 DEBUG [M:0;04f7e7347dc7:33741 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce63b3700bdf45ceb6b261a10d8445cb is 69, key is 04f7e7347dc7,37721,1731806791503/rs:state/1731806794287/Put/seqid=0 2024-11-17T01:29:34,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073742412_1588 (size=5156) 2024-11-17T01:29:34,754 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2014 (bloomFilter=true), to=hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce63b3700bdf45ceb6b261a10d8445cb 2024-11-17T01:29:34,765 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/836b461391a84bc9891ef3446bea5959 as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/836b461391a84bc9891ef3446bea5959 2024-11-17T01:29:34,767 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/836b461391a84bc9891ef3446bea5959, entries=8, sequenceid=2014, filesize=5.5 K 2024-11-17T01:29:34,768 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f2c5e776d4324af58669927508299d0d as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f2c5e776d4324af58669927508299d0d 2024-11-17T01:29:34,770 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f2c5e776d4324af58669927508299d0d 2024-11-17T01:29:34,770 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f2c5e776d4324af58669927508299d0d, entries=175, sequenceid=2014, filesize=43.4 K 2024-11-17T01:29:34,771 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ce63b3700bdf45ceb6b261a10d8445cb as hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce63b3700bdf45ceb6b261a10d8445cb 2024-11-17T01:29:34,773 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:37251/user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ce63b3700bdf45ceb6b261a10d8445cb, entries=1, sequenceid=2014, filesize=5.0 K 2024-11-17T01:29:34,773 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(3040): Finished flush of dataSize ~730.98 KB/748526, heapSize ~895.26 KB/916744, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 1315ms, sequenceid=2014, compaction requested=false 2024-11-17T01:29:34,774 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-17T01:29:34,774 DEBUG [M:0;04f7e7347dc7:33741 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-17T01:29:34,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40381 is added to blk_1073741830_1006 (size=880659) 2024-11-17T01:29:34,776 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(743): complete file /user/jenkins/test-data/71a1396b-0ef7-be6c-e6b1-dd06ea504f50/MasterData/WALs/04f7e7347dc7,33741,1731806790757/04f7e7347dc7%2C33741%2C1731806790757.1731806793544 not finished, retry = 0 2024-11-17T01:29:34,877 INFO [M:0;04f7e7347dc7:33741 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-17T01:29:34,877 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-17T01:29:34,877 INFO [M:0;04f7e7347dc7:33741 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33741 2024-11-17T01:29:34,890 DEBUG [M:0;04f7e7347dc7:33741 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/04f7e7347dc7,33741,1731806790757 already deleted, retry=false 2024-11-17T01:29:34,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T01:29:34,999 INFO [M:0;04f7e7347dc7:33741 {}] regionserver.HRegionServer(1307): Exiting; stopping=04f7e7347dc7,33741,1731806790757; zookeeper connection closed. 2024-11-17T01:29:34,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33741-0x10147b023e30000, quorum=127.0.0.1:63898, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-17T01:29:35,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10ba49e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-17T01:29:35,013 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T01:29:35,013 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T01:29:35,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T01:29:35,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.log.dir/,STOPPED} 2024-11-17T01:29:35,017 WARN [BP-349443756-172.17.0.2-1731806787529 heartbeating to localhost/127.0.0.1:37251 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-17T01:29:35,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-17T01:29:35,017 WARN [BP-349443756-172.17.0.2-1731806787529 heartbeating to localhost/127.0.0.1:37251 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-349443756-172.17.0.2-1731806787529 (Datanode Uuid 179fcf81-5be3-4b56-a246-03f9dcc7c88c) service to localhost/127.0.0.1:37251 2024-11-17T01:29:35,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-17T01:29:35,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data1/current/BP-349443756-172.17.0.2-1731806787529 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T01:29:35,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/cluster_a29a2f8e-53ea-aed3-f5f5-dfe0b4ad4841/dfs/data/data2/current/BP-349443756-172.17.0.2-1731806787529 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-17T01:29:35,020 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-17T01:29:35,026 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-17T01:29:35,027 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-17T01:29:35,027 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-17T01:29:35,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-17T01:29:35,027 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/5f153a27-2217-ea90-c1a7-3f4369d30d6b/hadoop.log.dir/,STOPPED} 2024-11-17T01:29:35,043 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-17T01:29:35,157 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down